Loading packages

knitr::opts_chunk$set(echo = TRUE)
pacman::p_load(tidyverse, 
               here,
               metafor,
               emmeans,
               orchaRd, 
               MCMCglmm,
               corpcor,
               clubSandwich,
               MuMIn, 
               patchwork,
               naniar,
               GoodmanKruskal,
               networkD3,
               ggplot2,
               ggalluvial,
               ggthemr,
               cowplot)
# needed for model selection using MuMin within metafor
eval(metafor:::.MuMIn)

Loading data and functions

dat <- read_csv(here("Data","Full_data.csv"))
# Load custom function to extract data 
#source(here("R/functions.R")) 
source(here("R/functions_cleanedup.R")) 

Data organisation

Getting effect sizes from function, ‘flipping’ effect sizes so that all effect sizes are higher values = individuals do better and learning/memory, and shifting negative values to positive as lnRR cannot use negative values

#removing study with negative values as these are unable to be used for lnRR
dat <- droplevels(dat[!dat$First_author == 'Wang',])

#Getting effect sizes
effect_size <- effect_set(CC_n = "CC_n", CC_mean = "CC_mean", CC_SD = "CC_SD",
                          EC_n = "EC_n", EC_mean = "EC_mean" , EC_SD ="EC_SD",
                          CS_n = "CS_n", CS_mean = "CS_mean", CS_SD = "CS_SD",
                          ES_n = "ES_n", ES_mean = "ES_mean", ES_SD = "ES_SD",
                          data = dat)
#'pure' effect sizes
effect_size2 <- effect_set2(CC_n = "CC_n", CC_mean = "CC_mean", CC_SD = "CC_SD",
                          EC_n = "EC_n", EC_mean = "EC_mean" , EC_SD ="EC_SD",
                          CS_n = "CS_n", CS_mean = "CS_mean", CS_SD = "CS_SD",
                          ES_n = "ES_n", ES_mean = "ES_mean", ES_SD = "ES_SD",
                          data = dat) 

#Removing missing effect sizes
dim(dat)
full_info <- which(complete.cases(effect_size) == TRUE)
dat_effect <- cbind(dat, effect_size, effect_size2)
dat <- dat_effect[full_info, ]
names(dat_effect)
dat <- dat_effect[full_info, ]

dim(dat_effect)
dimentions <- dim(dat) 

#Flipping 'lower is better' effect sizes
#flipping lnRR for values where higher = worse
dat$lnRR_Ea <- ifelse(dat$Response_direction == 2, dat$lnRR_E*-1,ifelse(is.na(dat$Response_direction) == TRUE, NA, dat$lnRR_E)) # currently NAswhich causes error
dat$lnRR_Sa  <- ifelse(dat$Response_direction == 2, dat$lnRR_S*-1,ifelse(is.na(dat$Response_direction) == TRUE, NA, dat$lnRR_S)) # currently NAswhich causes error
dat$lnRR_ESa <-  ifelse(dat$Response_direction == 2, dat$lnRR_ES*-1,ifelse(is.na(dat$Response_direction) == TRUE, NA, dat$lnRR_ES)) # currently NAswhich causes error

#flipping 'pure effect sizes'
dat$lnRR_E2a <- ifelse(dat$Response_direction == 2, dat$lnRR_E2*-1,ifelse(is.na(dat$Response_direction) == TRUE, NA, dat$lnRR_E2)) # currently NAswhich causes error
dat$lnRR_S2a  <- ifelse(dat$Response_direction == 2, dat$lnRR_S2*-1,ifelse(is.na(dat$Response_direction) == TRUE, NA, dat$lnRR_S2)) # currently NAswhich causes error
dat$lnRR_ES2a <-  ifelse(dat$Response_direction == 2, dat$lnRR_ES2*-1,ifelse(is.na(dat$Response_direction) == TRUE, NA, dat$lnRR_ES2)) # currently NAswhich causes error
dat$lnRR_E3a <-  ifelse(dat$Response_direction == 2, dat$lnRR_E3*-1,ifelse(is.na(dat$Response_direction) == TRUE, NA, dat$lnRR_E3)) # currently NAswhich causes error
dat$lnRR_S3a <-  ifelse(dat$Response_direction == 2, dat$lnRR_S3*-1,ifelse(is.na(dat$Response_direction) == TRUE, NA, dat$lnRR_S3)) # currently NAswhich causes error

#flipping SMD
dat$SMD_Ea <- ifelse(dat$Response_direction == 2, dat$SMD_E*-1,ifelse(is.na(dat$Response_direction) == TRUE, NA, dat$SMD_E)) # currently NAswhich causes error
dat$SMD_Sa  <- ifelse(dat$Response_direction == 2, dat$SMD_S*-1,ifelse(is.na(dat$Response_direction) == TRUE, NA, dat$SMD_S)) # currently NAswhich causes error
dat$SMD_ESa <-  ifelse(dat$Response_direction == 2, dat$SMD_ES*-1,ifelse(is.na(dat$Response_direction) == TRUE, NA, dat$SMD_ES))

#rounding down integer sample sizes 
dat$CC_n <- floor(dat$CC_n)
dat$EC_n <- floor(dat$EC_n)
dat$CS_n <- floor(dat$CS_n)
dat$ES_n <- floor(dat$CS_n)

#assigning human readable terms
dat <- dat %>% mutate(Type_learning = case_when(Type_learning == 1 ~ "Habituation",
                                                Type_learning == 2 ~ "Conditioning",
                                                Type_learning == 3 ~ "Recognition", 
                                                Type_learning == 4 ~ "Unclear"),
                      Learning_vs_memory = case_when(Learning_vs_memory == 1 ~ "Learning",
                                                     Learning_vs_memory == 2 ~ "Memory", 
                                                     Learning_vs_memory == 1 ~ "Unclear"),
                      Appetitive_vs_aversive = case_when(Appetitive_vs_aversive == 1 ~"Appetitive",
                                                         Appetitive_vs_aversive == 2 ~ "Aversive",
                                                         Appetitive_vs_aversive == 3 ~ "Not applicable",
                                                         Appetitive_vs_aversive == 4 ~ "Unclear"),
                      Type_stress_exposure = case_when(Type_stress_exposure == 1 ~ "Density",
                                                       Type_stress_exposure == 2 ~ "Scent",
                                                       Type_stress_exposure == 3 ~ "Shock",
                                                       Type_stress_exposure == 4 ~ "Exertion",
                                                       Type_stress_exposure == 5 ~ "Restraint",
                                                       Type_stress_exposure == 6 ~ "MS",
                                                       Type_stress_exposure == 7 ~ "Circadian rhythm",
                                                       Type_stress_exposure == 8 ~ "Noise",
                                                       Type_stress_exposure == 9 ~ "Other",
                                                       Type_stress_exposure == 10 ~ "Combination",
                                                       Type_stress_exposure == 11 ~ "unclear"), 
                      Age_stress_exposure = case_when(Age_stress_exposure == 1 ~ "Prenatal",
                                                      Age_stress_exposure == 2 ~ "Early postnatal",
                                                      Age_stress_exposure == 3 ~ "Adolescent",
                                                      Age_stress_exposure == 4 ~ "Adult",
                                                      Age_stress_exposure == 5 ~ "Unclear"),
                      Stress_duration = case_when(Stress_duration == 1 ~ "Acute",
                                                  Stress_duration == 2 ~ "Chronic",
                                                  Stress_duration == 3 ~ "Intermittent",
                                                  Stress_duration == 4 ~ "Unclear"),
                      EE_social = case_when(EE_social == 1 ~ "Social",
                                            EE_social== 2 ~ "Non-social", 
                                            EE_social == 3 ~ "Unclear"), 
                      EE_exercise = case_when(EE_exercise == 1 ~ "Exercise", 
                                              EE_exercise == 2 ~ "No exercise"),
                      Age_EE_exposure = case_when(Age_EE_exposure == 1 ~ "Prenatal", 
                                                  Age_EE_exposure == 2 ~ "Early postnatal",
                                                  Age_EE_exposure == 3 ~ "Adolescent", 
                                                  Age_EE_exposure == 4 ~ "Adult",
                                                  Age_EE_exposure == 5 ~ "Unclear"),
                      Stress_vs_EE_timing = case_when(Stress_vs_EE_timing == 1 ~ "Stress first",
                                                      Stress_vs_EE_timing == 2 ~ "Enrichment first",
                                                      Stress_vs_EE_timing == 3 ~ "Concurrently", 
                                                      Stress_vs_EE_timing == 4 ~ "Unclear"))

#making variance VCVs
VCV_E <- impute_covariance_matrix(vi = dat$lnRRV_E, cluster = dat$Study_ID, r = 0.5)
VCV_S <- impute_covariance_matrix(vi = dat$lnRRV_S, cluster = dat$Study_ID, r = 0.5)
VCV_ES <- impute_covariance_matrix(vi = dat$lnRRV_ES, cluster = dat$Study_ID, r = 0.5)

VCV_Ea <- impute_covariance_matrix(vi = dat$SMDV_E, cluster = dat$Study_ID, r = 0.5)
VCV_Sa <- impute_covariance_matrix(vi = dat$SMDV_S, cluster = dat$Study_ID, r = 0.5)
VCV_ESa <- impute_covariance_matrix(vi = dat$SMDV_ES, cluster = dat$Study_ID, r = 0.5)

Data exploration

General

#Number of effect sizes
length(unique(dat$ES_ID))  
## [1] 92
#Number of studies
length(unique(dat$Study_ID))
## [1] 30
#Publication years
min(dat$Year_published) 
## [1] 2006
max(dat$Year_published)
## [1] 2021

Explore associations among predictor variables

#see columns with missing values
vis_miss(dat) +
  theme(plot.title = element_text(hjust = 0.5, vjust = 3), 
        plot.margin = margin(t = 0.5, r = 3, b = 1, l = 1, unit = "cm")) +
  ggtitle("Missing data in the selected predictors") #no mising values

#useGoodman and Kruskal’s τ measure of association between categorical predictor variables (function from package GoodmanKruskal: https://cran.r-project.org/web/packages/GoodmanKruskal/vignettes/GoodmanKruskal.html)
#GKmatrix <- GKtauDataframe(subset(dat, select = c("Sex", "Type_learning", "Learning_vs_memory", #"Appetitive_vs_aversive",  "Type_stress_exposure", "Age_stress_exposure", "Stress_duration", #"EE_social_HR", "EE_exercise", "Age_EE_exposure", "Stress_vs_EE_timing", "Age_assay")))
#plot(GKmatrix)

#simple pairwise contingency tables
table(dat$Type_learning, dat$Appetitive_vs_aversive) 
table(dat$Age_stress_exposure, dat$Age_EE_exposure) 
table(dat$Type_stress_exposure, dat$Age_stress_exposure)
table(dat$Type_stress_exposure, dat$Age_assay)
table(dat$Type_stress_exposure, dat$Stress_duration)

Alluvial diagrams

Species and strain

# create a frequency table for Species and Strain variables
freq_1 <- as.data.frame(table(dat$Common_species, dat$Strain)) %>% rename(Species = Var1, Strain = Var2)

is_alluvia_form(as.data.frame(freq_1), axes = 1:2, silent = TRUE)

ggplot(data = freq_1,
  aes(axis1 = Species, axis2 = Strain, y = Freq)) +
  geom_alluvium(aes(fill = Strain)) +
  geom_stratum(aes(fill = Species)) +
  geom_text(stat = "stratum", aes(label = after_stat(stratum))) +
#theme_minimal() +
  theme_void() +
  theme(legend.position = "none",
        plot.title = element_text(hjust = 0.5, vjust = 3),
        axis.title.x = element_text(),
        axis.text.x = element_text(face="bold")) +
  scale_x_discrete(limits = c("Species", "Strain"), expand = c(0.15, 0.05), position = "top") +
  ggtitle("Species and strains used (set1)")

Species, strain and sex

# create frequency table for Sex, Species and Strain
freq_2 <- as.data.frame(table(dat$Sex, dat$Common_species, dat$Strain)) %>% rename(Sex = Var1, Species = Var2, Strain = Var3)
is_alluvia_form(as.data.frame(freq_2), axes = 1:3, silent = TRUE)

ggplot(data = freq_2,
  aes(axis1 = Sex, axis2 = Species, axis3 = Strain, y = Freq)) +
  geom_alluvium(aes(fill = Sex)) +
  geom_flow() +
  geom_stratum(aes(fill = Sex)) +
  geom_text(stat = "stratum", aes(label = after_stat(stratum))) +
  #theme_minimal() +
  theme_void() +
  theme(legend.position = "none",
        plot.title = element_text(hjust = 0.5, vjust = 3),
        axis.title.x = element_text(),
        axis.text.x = element_text(face="bold")) +
  scale_x_discrete(limits = c("Sex", "Species", "Strain"), expand = c(0.15, 0.05), position = "top") +
  ggtitle("Species, strains and sex used (set2)")
## Warning: The `.dots` argument of `group_by()` is deprecated as of dplyr 1.0.0.

freq_2 %>% filter(Freq != 0) %>% arrange(desc(Freq)) #collapesd table of values, without 0s

Age at strss and EE

freq_ages1 <- as.data.frame(table(dat$Age_stress_exposure, dat$Age_EE_exposure)) %>% rename(Age_stress = Var1, Age_EE = Var2)
is_alluvia_form(as.data.frame(freq_ages1), axes = 1:2, silent = TRUE)

ggplot(data = freq_ages1,
  aes(axis1 = Age_stress, axis2 = Age_EE, y = Freq)) +
  geom_alluvium(aes(fill = Age_EE)) +
  geom_stratum(aes(fill = Age_stress)) +
  geom_text(stat = "stratum", aes(label = after_stat(stratum))) +
#theme_minimal() +
  theme_void() +
  theme(legend.position = "none",
        plot.title = element_text(hjust = 0.5, vjust = 3),
        axis.title.x = element_text(),
        axis.text.x = element_text(face="bold")) +
  scale_x_discrete(limits = c("Stress", "Enrichment"), expand = c(0.15, 0.05), position = "top") +
  ggtitle("Life stages used (set1)")
## Warning in to_lodes_form(data = data, axes = axis_ind, discern =
## params$discern): Some strata appear at multiple axes.

## Warning in to_lodes_form(data = data, axes = axis_ind, discern =
## params$discern): Some strata appear at multiple axes.

## Warning in to_lodes_form(data = data, axes = axis_ind, discern =
## params$discern): Some strata appear at multiple axes.

Age at stress, EE, assay

freq_ages2 <- as.data.frame(table(dat$Age_stress_exposure, dat$Age_EE_exposure, dat$Age_assay)) %>% rename(Age_stress = Var1, Age_EE = Var2, Age_assay = Var3)

is_alluvia_form(as.data.frame(freq_ages2), axes = 1:3, silent = TRUE)

ggplot(data = freq_ages2,
  aes(axis1 = Age_stress, axis2 = Age_EE, axis3 = Age_assay, y = Freq)) +
  geom_alluvium(aes(fill = Age_stress)) +
  geom_flow() +
  geom_stratum(aes(fill = Age_stress)) +
  geom_text(stat = "stratum", aes(label = after_stat(stratum))) +
  #theme_minimal() +
  theme_void() +
  theme(legend.position = "none",
        plot.title = element_text(hjust = 0.5, vjust = 3),
        axis.title.x = element_text(),
        axis.text.x = element_text(face="bold")) +
  scale_x_discrete(limits = c("Stress", "Enrichment", "Assay"), expand = c(0.15, 0.05), position = "top") +
  ggtitle("Life stages used (set2)")
## Warning in to_lodes_form(data = data, axes = axis_ind, discern =
## params$discern): Some strata appear at multiple axes.

## Warning in to_lodes_form(data = data, axes = axis_ind, discern =
## params$discern): Some strata appear at multiple axes.

## Warning in to_lodes_form(data = data, axes = axis_ind, discern =
## params$discern): Some strata appear at multiple axes.

## Warning in to_lodes_form(data = data, axes = axis_ind, discern =
## params$discern): Some strata appear at multiple axes.

freq_ages2 %>% filter(Freq != 0) %>% arrange(desc(Freq)) #collapesd table of values, without 0s

Age at stress, EE, assay, exposure order

freq_ages3 <- as.data.frame(table(dat$Age_stress_exposure, dat$Age_EE_exposure, dat$Age_assay, dat$Stress_vs_EE_timing)) %>% rename(Age_stress = Var1, Age_EE = Var2, Age_assay = Var3, Order = Var4)
is_alluvia_form(as.data.frame(freq_ages3), axes = 1:4, silent = TRUE)

ggplot(data = freq_ages3,
  aes(axis1 = Age_stress, axis2 = Age_EE, axis3 = Age_assay, axis4 = Order,  y = Freq)) +
  geom_alluvium(aes(fill = Age_stress)) +
  geom_flow() +
  geom_stratum(aes(fill = Age_stress)) +
  geom_text(stat = "stratum", aes(label = after_stat(stratum))) +
  #theme_minimal() +
  theme_void() +
  theme(legend.position = "none",
        plot.title = element_text(hjust = 0.5, vjust = 3),
        axis.title.x = element_text(),
        axis.text.x = element_text(face="bold")) +
  scale_x_discrete(limits = c("Stress", "Enrichment", "Assay", "Order"), expand = c(0.15, 0.05), position = "top") +
  ggtitle("Life stages used and order (set3)")
## Warning in to_lodes_form(data = data, axes = axis_ind, discern =
## params$discern): Some strata appear at multiple axes.

## Warning in to_lodes_form(data = data, axes = axis_ind, discern =
## params$discern): Some strata appear at multiple axes.

## Warning in to_lodes_form(data = data, axes = axis_ind, discern =
## params$discern): Some strata appear at multiple axes.

## Warning in to_lodes_form(data = data, axes = axis_ind, discern =
## params$discern): Some strata appear at multiple axes.

freq_ages2 %>% filter(Freq != 0) %>% arrange(desc(Freq)) #collapesd table of values, without 0s
freq_ages3 %>% filter(Freq != 0) %>% arrange(desc(Freq)) #collapesd table of values, without 0s

Stress duration and type of stress

freq_stress1 <- as.data.frame(table(dat$Stress_duration, dat$Type_stress_exposure)) %>% rename(Stress_duration = Var1,  Stress_type = Var2)
is_alluvia_form(as.data.frame(freq_stress1), axes = 1:2, silent = TRUE)

ggplot(data = freq_stress1,
  aes(axis1 = Stress_duration, axis2 = Stress_type, y = Freq)) +
  geom_alluvium(aes(fill = Stress_duration)) +
  geom_stratum(aes(fill = Stress_duration)) +
  geom_text(stat = "stratum", aes(label = after_stat(stratum))) +
#theme_minimal() +
  theme_void() +
  theme(legend.position = "none",
        plot.title = element_text(hjust = 0.5, vjust = 3),
        axis.title.x = element_text(),
        axis.text.x = element_text(face="bold")) +
  scale_x_discrete(limits = c("Duration", "Type"), expand = c(0.15, 0.05), position = "top") +
  ggtitle("Stress exposure  variables (set1)")

Age at stress, type of stress, stress duration

freq_stress2 <- as.data.frame(table(dat$Age_stress_exposure, dat$Stress_duration, dat$Type_stress_exposure)) %>% rename(Age_stress = Var1, Duration_stress = Var2, Type_stress = Var3)
is_alluvia_form(as.data.frame(freq_stress2), axes = 1:3, silent = TRUE)

ggplot(data = freq_stress2,
  aes(axis1 = Age_stress, axis2 = Duration_stress, axis3 = Type_stress, y = Freq)) +
  geom_alluvium(aes(fill = Age_stress)) +
  geom_flow() +
  geom_stratum(aes(fill = Age_stress)) +
  geom_text(stat = "stratum", aes(label = after_stat(stratum))) +
  #theme_minimal() +
  theme_void() +
  theme(legend.position = "none",
        plot.title = element_text(hjust = 0.5, vjust = 3),
        axis.title.x = element_text(),
        axis.text.x = element_text(face="bold")) +
  scale_x_discrete(limits = c("Age", "Duration", "Type"), expand = c(0.1, 0.1), position = "top") +
  ggtitle("Stress exposure variables (set2)")

### Exercise, Social EE

freq_EE1 <- as.data.frame(table(dat$EE_exercise, dat$EE_social)) %>% rename(EE_exercise = Var1, EE_social = Var2)
is_alluvia_form(as.data.frame(freq_stress1), axes = 1:2, silent = TRUE)

ggplot(data = freq_EE1,
  aes(axis1 = EE_exercise, axis2 = EE_social, y = Freq)) +
  geom_alluvium(aes(fill = EE_exercise)) +
  geom_stratum(aes(fill = EE_exercise)) +
  geom_text(stat = "stratum", aes(label = after_stat(stratum))) +
#theme_minimal() +
  theme_void() +
  theme(legend.position = "none",
        plot.title = element_text(hjust = 0.5, vjust = 3),
        axis.title.x = element_text(),
        axis.text.x = element_text(face="bold")) +
  scale_x_discrete(limits = c("Exercise", "Social"), expand = c(0.15, 0.05), position = "top") +
  ggtitle("EE exposure  variables (set1)")

Age EE, exercise, social

freq_EE2 <- as.data.frame(table(dat$Age_EE_exposure, dat$EE_exercise, dat$EE_social)) %>% rename(Age_EE = Var1, EE_exercise = Var2, EE_social = Var3)
is_alluvia_form(as.data.frame(freq_EE2), axes = 1:3, silent = TRUE)

ggplot(data = freq_EE2,
  aes(axis1 = Age_EE, axis2 = EE_exercise, axis3 = EE_social, y = Freq)) +
  geom_alluvium(aes(fill = Age_EE)) +
  geom_flow() +
  geom_stratum(aes(fill = Age_EE)) +
  geom_text(stat = "stratum", aes(label = after_stat(stratum))) +
  #theme_minimal() +
  theme_void() +
  theme(legend.position = "none",
        plot.title = element_text(hjust = 0.5, vjust = 3),
        axis.title.x = element_text(),
        axis.text.x = element_text(face="bold")) +
  scale_x_discrete(limits = c("Age", "Exercise", "Social"), expand = c(0.1, 0.1), position = "top") +
  ggtitle("EE exposure variables (set2)")

Learning vs memory, type of reinforcement

freq_assay1 <- as.data.frame(table(dat$Learning_vs_memory, dat$Appetitive_vs_aversive)) %>% rename(Learning_Memory = Var1, Reinforcement = Var2)
is_alluvia_form(as.data.frame(freq_assay1), axes = 1:2, silent = TRUE)

ggplot(data = freq_assay1,
  aes(axis1 = Learning_Memory, axis2 = Reinforcement, y = Freq)) +
  geom_alluvium(aes(fill = Learning_Memory)) +
  geom_stratum(aes(fill = Learning_Memory)) +
  geom_text(stat = "stratum", aes(label = after_stat(stratum))) +
#theme_minimal() +
  theme_void() +
  theme(legend.position = "none",
        plot.title = element_text(hjust = 0.5, vjust = 3),
        axis.title.x = element_text(),
        axis.text.x = element_text(face="bold")) +
  scale_x_discrete(limits = c("Learning_Memory", "Reinforcement"), expand = c(0.15, 0.05), position = "top") +
  ggtitle("Behavioural assay  variables (set1)")

Age at assay, learning vs memory, type of reinforcement

freq_assay2 <- as.data.frame(table(dat$Age_assay, dat$Learning_vs_memory, dat$Appetitive_vs_aversive)) %>% rename(Age_assay = Var1, Learning_Memory = Var2, Reinforcement = Var3)
is_alluvia_form(as.data.frame(freq_assay2), axes = 1:3, silent = TRUE)

ggplot(data = freq_assay2,
  aes(axis1 = Age_assay, axis2 = Learning_Memory, axis3 = Reinforcement, y = Freq)) +
  geom_alluvium(aes(fill = Age_assay)) +
  geom_flow() +
  geom_stratum(aes(fill = Age_assay)) +
  geom_text(stat = "stratum", aes(label = after_stat(stratum))) +
  #theme_minimal() +
  theme_void() +
  theme(legend.position = "none",
        plot.title = element_text(hjust = 0.5, vjust = 3),
        axis.title.x = element_text(),
        axis.text.x = element_text(face="bold")) +
  scale_x_discrete(limits = c("Age", "Learning_Memory", "Reinforcement"), expand = c(0.1, 0.1), position = "top") +
  ggtitle("Behavioural assay  variables (set2)")

Type of learning, learning vs memory, type of reinforcement

freq_assay3 <- as.data.frame(table(dat$Type_learning, dat$Learning_vs_memory, dat$Appetitive_vs_aversive)) %>% rename(Type = Var1, Learning_Memory = Var2, Reinforcement = Var3)
is_alluvia_form(as.data.frame(freq_assay3), axes = 1:3, silent = TRUE)

ggplot(data = freq_assay3,
  aes(axis1 = Type, axis2 = Learning_Memory, axis3 = Reinforcement, y = Freq)) +
  geom_alluvium(aes(fill = Type)) +
  geom_flow() +
  geom_stratum(aes(fill = Type)) +
  geom_text(stat = "stratum", aes(label = after_stat(stratum))) +
  #theme_minimal() +
  theme_void() +
  theme(legend.position = "none",
        plot.title = element_text(hjust = 0.5, vjust = 3),
        axis.title.x = element_text(),
        axis.text.x = element_text(face="bold")) +
  scale_x_discrete(limits = c("Type", "Learning_Memory", "Reinforcement"), expand = c(0.1, 0.1), position = "top") +
  ggtitle("Behavioural assay  variables (set3)")

```` # Modelling with lnRR

Environmental enrichment

Intercept model

Learning and memory are significantly improved when housed with environmnetal enrichment

mod_E0 <- rma.mv(yi = lnRR_Ea, V = VCV_E, random = list(~1|Study_ID,
                                                          ~1|ES_ID,
                                                          ~1|Strain),
                 test = "t",
                 data = dat)
summary(mod_E0) 
## 
## Multivariate Meta-Analysis Model (k = 92; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
## -20.3125   40.6249   48.6249   58.6683   49.0900   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0044  0.0665     30     no  Study_ID 
## sigma^2.2  0.0485  0.2203     92     no     ES_ID 
## sigma^2.3  0.0000  0.0000      6     no    Strain 
## 
## Test for Heterogeneity:
## Q(df = 91) = 897.6814, p-val < .0001
## 
## Model Results:
## 
## estimate      se    tval  df    pval   ci.lb   ci.ub 
##   0.2146  0.0339  6.3344  91  <.0001  0.1473  0.2818  *** 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
i2_ml(mod_E0) 
##     I2_total  I2_Study_ID     I2_ES_ID    I2_Strain 
## 9.398214e-01 7.853525e-02 8.612861e-01 7.246533e-10
orchard_plot(mod_E0, mod = "Int", xlab = "lnRR", alpha=0.4) +  # Orchard plot 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5)+ # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2)+ # confidence intervals
  geom_point(aes(fill = name),  size = 5, shape = 21)+ # mean estimate
  scale_colour_manual(values = "darkorange")+ # change colours
  scale_fill_manual(values="darkorange")+ 
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        text = element_text(size = 24), # change font sizes
        legend.title = element_text(size = 15),
        legend.text = element_text(size = 13)) 

Meta-regression

Type of learning

The type of learning/memory response

dat1 <- filter(dat, Type_learning %in% c("Recognition", "Habituation", "Conditioning"))
VCV_E1 <- impute_covariance_matrix(vi = dat1$lnRRV_E, cluster = dat$Study_ID, r = 0.5)

mod_E1 <- rma.mv(yi = lnRR_Ea, V = VCV_E1, mod = ~Type_learning-1, random = list(~1|Study_ID,
                                                                                  ~1|ES_ID,
                                                                                  ~1|Strain),
                 test = "t",
                 data = dat1)

summary(mod_E1)
## 
## Multivariate Meta-Analysis Model (k = 92; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
## -18.0617   36.1234   48.1234   63.0553   49.1478   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0081  0.0898     30     no  Study_ID 
## sigma^2.2  0.0434  0.2083     92     no     ES_ID 
## sigma^2.3  0.0000  0.0000      6     no    Strain 
## 
## Test for Residual Heterogeneity:
## QE(df = 89) = 790.2293, p-val < .0001
## 
## Test of Moderators (coefficients 1:3):
## F(df1 = 3, df2 = 89) = 14.6747, p-val < .0001
## 
## Model Results:
## 
##                            estimate      se    tval  df    pval    ci.lb 
## Type_learningConditioning    0.2514  0.0383  6.5700  89  <.0001   0.1754 
## Type_learningHabituation     0.2267  0.1216  1.8639  89  0.0656  -0.0150 
## Type_learningRecognition     0.0635  0.0706  0.8997  89  0.3707  -0.0768 
##                             ci.ub 
## Type_learningConditioning  0.3275  *** 
## Type_learningHabituation   0.4684    . 
## Type_learningRecognition   0.2039      
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
r2_ml(mod_E1) 
##   R2_marginal R2_coditional 
##    0.08104773    1.00000000
Learning_E <- orchard_plot(mod_E1, mod = "Type_learning", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  xlim(-0.5, 2) + 
  geom_point(aes(fill = name),  size = 5, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        text = element_text(size = 15), # change font sizes
        legend.title = element_text(size = 10),
        legend.text = element_text(size = 10)) 

Learning vs Memory

Is the assay broadly measuring learning or memory?

mod_E2 <-  rma.mv(yi = lnRR_Ea, V = VCV_E, mod = ~Learning_vs_memory-1, random = list(~1|Study_ID,
                                                                                        ~1|ES_ID,
                                                                                        ~1|Strain),
                  test = "t",
                  data = dat)

summary(mod_E2) 
## 
## Multivariate Meta-Analysis Model (k = 85; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
## -11.6265   23.2529   33.2529   45.3471   34.0322   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0077  0.0877     30     no  Study_ID 
## sigma^2.2  0.0310  0.1760     85     no     ES_ID 
## sigma^2.3  0.0000  0.0000      6     no    Strain 
## 
## Test for Residual Heterogeneity:
## QE(df = 83) = 652.9299, p-val < .0001
## 
## Test of Moderators (coefficients 1:2):
## F(df1 = 2, df2 = 83) = 21.0340, p-val < .0001
## 
## Model Results:
## 
##                             estimate      se    tval  df    pval   ci.lb 
## Learning_vs_memoryLearning    0.2458  0.0475  5.1789  83  <.0001  0.1514 
## Learning_vs_memoryMemory      0.1884  0.0360  5.2368  83  <.0001  0.1169 
##                              ci.ub 
## Learning_vs_memoryLearning  0.3402  *** 
## Learning_vs_memoryMemory    0.2600  *** 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
r2_ml(mod_E2) 
##   R2_marginal R2_coditional 
##    0.01866131    1.00000000
LvsM_E<- orchard_plot(mod_E2, mod = "Learning_vs_memory", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  xlim(-0.5, 2) + 
  geom_point(aes(fill = name),  size = 5, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        text = element_text(size = 15), # change font sizes
        legend.title = element_text(size = 10),
        legend.text = element_text(size = 10)) 

Type of reinforcement

The type of cue used

dat2 <- filter(dat, Appetitive_vs_aversive %in% c("Appetitive", "Aversive", "Not applicable"))
VCV_E2 <- impute_covariance_matrix(vi = dat2$lnRRV_E, cluster = dat2$Study_ID, r = 0.5)
mod_E3 <- rma.mv(yi = lnRR_Ea, V = VCV_E2, mod = ~ Appetitive_vs_aversive-1, random = list(~1|Study_ID,
                                                                                            ~1|ES_ID,
                                                                                            ~1|Strain),
                 test = "t",
                 data = dat2)

summary(mod_E3)
## 
## Multivariate Meta-Analysis Model (k = 92; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
## -18.1008   36.2017   48.2017   63.1335   49.2261   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0061  0.0778     30     no  Study_ID 
## sigma^2.2  0.0449  0.2119     92     no     ES_ID 
## sigma^2.3  0.0000  0.0000      6     no    Strain 
## 
## Test for Residual Heterogeneity:
## QE(df = 89) = 780.4926, p-val < .0001
## 
## Test of Moderators (coefficients 1:3):
## F(df1 = 3, df2 = 89) = 15.1398, p-val < .0001
## 
## Model Results:
## 
##                                       estimate      se    tval  df    pval 
## Appetitive_vs_aversiveAppetitive        0.1949  0.0721  2.7023  89  0.0082 
## Appetitive_vs_aversiveAversive          0.2704  0.0439  6.1559  89  <.0001 
## Appetitive_vs_aversiveNot applicable    0.1082  0.0600  1.8030  89  0.0748 
##                                         ci.lb   ci.ub 
## Appetitive_vs_aversiveAppetitive       0.0516  0.3382   ** 
## Appetitive_vs_aversiveAversive         0.1831  0.3577  *** 
## Appetitive_vs_aversiveNot applicable  -0.0110  0.2274    . 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
r2_ml(mod_E3) 
##   R2_marginal R2_coditional 
##    0.08421309    1.00000000
Reinforcement_E <-orchard_plot(mod_E3, mod = "Appetitive_vs_aversive", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  xlim(-0.5, 2) + 
  geom_point(aes(fill = name),  size = 5, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        text = element_text(size = 15), # change font sizes
        legend.title = element_text(size = 10),
        legend.text = element_text(size = 10)) 

Social enrichment

Does EE also include a manipulation of social environment? Note that we excluded any studies that exclusively used social enrichment.s

dat5 <- filter(dat, EE_social %in% c("Social", "Non-social"))
VCV_E5 <- impute_covariance_matrix(vi = dat5$lnRRV_E, cluster = dat$Study_ID, r = 0.5)
  
mod_E4<- rma.mv(yi = lnRR_Ea, V = VCV_E5, mod = ~EE_social-1, random = list(~1|Study_ID, 
                                                                             ~1|ES_ID,
                                                                             ~1|Strain),
                test = "t",data = dat5)

summary(mod_E4)
## 
## Multivariate Meta-Analysis Model (k = 92; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
## -20.1433   40.2867   50.2867   62.7857   51.0009   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0061  0.0781     30     no  Study_ID 
## sigma^2.2  0.0480  0.2190     92     no     ES_ID 
## sigma^2.3  0.0000  0.0000      6     no    Strain 
## 
## Test for Residual Heterogeneity:
## QE(df = 90) = 834.2589, p-val < .0001
## 
## Test of Moderators (coefficients 1:2):
## F(df1 = 2, df2 = 90) = 19.5680, p-val < .0001
## 
## Model Results:
## 
##                      estimate      se    tval  df    pval   ci.lb   ci.ub 
## EE_socialNon-social    0.1791  0.0548  3.2704  90  0.0015  0.0703  0.2879   ** 
## EE_socialSocial        0.2399  0.0450  5.3330  90  <.0001  0.1506  0.3293  *** 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
r2_ml(mod_E4) 
##   R2_marginal R2_coditional 
##    0.01636226    1.00000000
Social_E <-orchard_plot(mod_E4, mod = "EE_social", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  xlim(-0.5, 2) + 
  geom_point(aes(fill = name),  size = 5, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
       text = element_text(size = 15), # change font sizes
        legend.title = element_text(size = 10),
        legend.text = element_text(size = 10)) 

Exercise enrichment

Does the form of enrichment include exercise through a running wheel or treadmill?

mod_E5<- rma.mv(yi = lnRR_Ea, V = VCV_E, mod = ~EE_exercise-1, random = list(~1|Study_ID,
                                                                               ~1|ES_ID,
                                                                               ~1|Strain),
                test = "t",
                data = dat)

summary(mod_E5)
## 
## Multivariate Meta-Analysis Model (k = 92; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
## -20.4952   40.9905   50.9905   63.4895   51.7048   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0054  0.0734     30     no  Study_ID 
## sigma^2.2  0.0487  0.2208     92     no     ES_ID 
## sigma^2.3  0.0000  0.0000      6     no    Strain 
## 
## Test for Residual Heterogeneity:
## QE(df = 90) = 867.0038, p-val < .0001
## 
## Test of Moderators (coefficients 1:2):
## F(df1 = 2, df2 = 90) = 19.4577, p-val < .0001
## 
## Model Results:
## 
##                         estimate      se    tval  df    pval   ci.lb   ci.ub 
## EE_exerciseExercise       0.2156  0.0421  5.1141  90  <.0001  0.1318  0.2993 
## EE_exerciseNo exercise    0.2146  0.0601  3.5723  90  0.0006  0.0952  0.3339 
##  
## EE_exerciseExercise     *** 
## EE_exerciseNo exercise  *** 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
r2_ml(mod_E5) 
##   R2_marginal R2_coditional 
##  4.108541e-06  1.000000e+00
Exercise_E <-orchard_plot(mod_E5, mod = "EE_exercise", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  xlim(-0.5, 2) + 
  geom_point(aes(fill = name),  size = 5, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        text = element_text(size = 15), # change font sizes
        legend.title = element_text(size = 10),
        legend.text = element_text(size = 10)) 

Age of enrichment

The age at which the individuals were exposed to environmental enrichment.

dat6 <- filter(dat, Age_EE_exposure %in% c("Adult", "Adolescent"))
VCV_E6 <- impute_covariance_matrix(vi = dat6$lnRRV_E, cluster = dat6$Study_ID, r = 0.5)


mod_E6 <- rma.mv(yi = lnRR_Ea, V = VCV_E6, mod = ~Age_EE_exposure-1, random = list(~1|Study_ID,
                                                                                    ~1|ES_ID,
                                                                                    ~1|Strain),
                 test = "t",
                 data = dat6)

summary(mod_E6) 
## 
## Multivariate Meta-Analysis Model (k = 88; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
## -17.2225   34.4450   44.4450   56.7168   45.1950   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0047  0.0682     29     no  Study_ID 
## sigma^2.2  0.0457  0.2137     88     no     ES_ID 
## sigma^2.3  0.0000  0.0000      6     no    Strain 
## 
## Test for Residual Heterogeneity:
## QE(df = 86) = 834.1449, p-val < .0001
## 
## Test of Moderators (coefficients 1:2):
## F(df1 = 2, df2 = 86) = 21.8297, p-val < .0001
## 
## Model Results:
## 
##                            estimate      se    tval  df    pval   ci.lb   ci.ub 
## Age_EE_exposureAdolescent    0.2115  0.0389  5.4335  86  <.0001  0.1341  0.2889 
## Age_EE_exposureAdult         0.2572  0.0684  3.7599  86  0.0003  0.1212  0.3932 
##  
## Age_EE_exposureAdolescent  *** 
## Age_EE_exposureAdult       *** 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
r2_ml(mod_E6) 
##   R2_marginal R2_coditional 
##   0.006503496   0.999999999
Age_E<- orchard_plot(mod_E6, mod = "Age_EE_exposure", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  xlim(-0.5, 2) + 
  geom_point(aes(fill = name),  size = 5, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        text = element_text(size = 15), # change font sizes
        legend.title = element_text(size = 10),
        legend.text = element_text(size = 10))  

Full model

# filter data so that all K < 5 are removed
dat_Efm <- dat %>%
  filter(Type_learning %in% c("Recognition", "Habituation", "Conditioning"),
         Appetitive_vs_aversive %in% c("Appetitive", "Aversive", "Not applicable"),
         EE_social %in% c("Social", "Non-social"), 
         Age_EE_exposure %in% c("Adult", "Adolescent"))

VCV_Efm <- impute_covariance_matrix(vi = dat_Efm$lnRRV_E, cluster = dat$Study_ID, r = 0.5)
                 
mod_Efm <- rma.mv(yi = lnRR_Sa, V = VCV_Efm, mod = ~Type_learning-1 + Learning_vs_memory-1 + Appetitive_vs_aversive-1 + EE_social-1 + EE_exercise-1 + Age_EE_exposure-1 , random =   list(~1|Study_ID,
                                                                                    ~1|ES_ID,
                                                                                    ~1|Strain),
                    test = "t",
                 data = dat_Efm)
summary(mod_Efm)
## 
## Multivariate Meta-Analysis Model (k = 83; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
##  -9.9997   19.9993   41.9993   67.4917   46.1898   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0327  0.1807     29     no  Study_ID 
## sigma^2.2  0.0283  0.1683     83     no     ES_ID 
## sigma^2.3  0.0000  0.0000      6     no    Strain 
## 
## Test for Residual Heterogeneity:
## QE(df = 75) = 733.4585, p-val < .0001
## 
## Test of Moderators (coefficients 1:8):
## F(df1 = 8, df2 = 75) = 1.4677, p-val = 0.1834
## 
## Model Results:
## 
##                                       estimate      se     tval  df    pval 
## Type_learningConditioning              -0.2152  0.2053  -1.0485  75  0.2978 
## Type_learningRecognition               -0.3321  0.2663  -1.2470  75  0.2163 
## Learning_vs_memoryMemory               -0.0693  0.0543  -1.2763  75  0.2058 
## Appetitive_vs_aversiveAversive          0.1935  0.1526   1.2678  75  0.2088 
## Appetitive_vs_aversiveNot applicable    0.3664  0.2266   1.6173  75  0.1100 
## EE_socialSocial                         0.0615  0.1122   0.5482  75  0.5852 
## EE_exerciseNo exercise                 -0.0058  0.1065  -0.0544  75  0.9568 
## Age_EE_exposureAdult                   -0.1037  0.1179  -0.8799  75  0.3817 
##                                         ci.lb   ci.ub 
## Type_learningConditioning             -0.6242  0.1937    
## Type_learningRecognition              -0.8627  0.1985    
## Learning_vs_memoryMemory              -0.1775  0.0389    
## Appetitive_vs_aversiveAversive        -0.1105  0.4975    
## Appetitive_vs_aversiveNot applicable  -0.0849  0.8178    
## EE_socialSocial                       -0.1620  0.2850    
## EE_exerciseNo exercise                -0.2180  0.2064    
## Age_EE_exposureAdult                  -0.3386  0.1311    
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
r2_ml(mod_Efm) 
##   R2_marginal R2_coditional 
##      0.166526      1.000000
res_Efm <- dredge(mod_Efm, trace=2)
## 
  |                                                                            
  |                                                                      |   0%
  |                                                                            
  |=                                                                     |   2%
  |                                                                            
  |==                                                                    |   3%
  |                                                                            
  |===                                                                   |   5%
  |                                                                            
  |====                                                                  |   6%
  |                                                                            
  |=====                                                                 |   8%
  |                                                                            
  |=======                                                               |   9%
  |                                                                            
  |========                                                              |  11%
  |                                                                            
  |=========                                                             |  12%
  |                                                                            
  |==========                                                            |  14%
  |                                                                            
  |===========                                                           |  16%
  |                                                                            
  |============                                                          |  17%
  |                                                                            
  |=============                                                         |  19%
  |                                                                            
  |==============                                                        |  20%
  |                                                                            
  |===============                                                       |  22%
  |                                                                            
  |================                                                      |  23%
  |                                                                            
  |==================                                                    |  25%
  |                                                                            
  |===================                                                   |  27%
  |                                                                            
  |====================                                                  |  28%
  |                                                                            
  |=====================                                                 |  30%
  |                                                                            
  |======================                                                |  31%
  |                                                                            
  |=======================                                               |  33%
  |                                                                            
  |========================                                              |  34%
  |                                                                            
  |=========================                                             |  36%
  |                                                                            
  |==========================                                            |  38%
  |                                                                            
  |===========================                                           |  39%
  |                                                                            
  |============================                                          |  41%
  |                                                                            
  |==============================                                        |  42%
  |                                                                            
  |===============================                                       |  44%
  |                                                                            
  |================================                                      |  45%
  |                                                                            
  |=================================                                     |  47%
  |                                                                            
  |==================================                                    |  48%
  |                                                                            
  |===================================                                   |  50%
  |                                                                            
  |====================================                                  |  52%
  |                                                                            
  |=====================================                                 |  53%
  |                                                                            
  |======================================                                |  55%
  |                                                                            
  |=======================================                               |  56%
  |                                                                            
  |========================================                              |  58%
  |                                                                            
  |==========================================                            |  59%
  |                                                                            
  |===========================================                           |  61%
  |                                                                            
  |============================================                          |  62%
  |                                                                            
  |=============================================                         |  64%
  |                                                                            
  |==============================================                        |  66%
  |                                                                            
  |===============================================                       |  67%
  |                                                                            
  |================================================                      |  69%
  |                                                                            
  |=================================================                     |  70%
  |                                                                            
  |==================================================                    |  72%
  |                                                                            
  |===================================================                   |  73%
  |                                                                            
  |====================================================                  |  75%
  |                                                                            
  |======================================================                |  77%
  |                                                                            
  |=======================================================               |  78%
  |                                                                            
  |========================================================              |  80%
  |                                                                            
  |=========================================================             |  81%
  |                                                                            
  |==========================================================            |  83%
  |                                                                            
  |===========================================================           |  84%
  |                                                                            
  |============================================================          |  86%
  |                                                                            
  |=============================================================         |  88%
  |                                                                            
  |==============================================================        |  89%
  |                                                                            
  |===============================================================       |  91%
  |                                                                            
  |=================================================================     |  92%
  |                                                                            
  |==================================================================    |  94%
  |                                                                            
  |===================================================================   |  95%
  |                                                                            
  |====================================================================  |  97%
  |                                                                            
  |===================================================================== |  98%
res_Efm2<- subset(res_Efm, delta <= 6, recalc.weights=FALSE)
importance(res_Efm2)
##                      Learning_vs_memory Age_EE_exposure EE_exercise
## Sum of weights:      0.94               0.52            0.24       
## N containing models:   20                 10               8       
##                      Appetitive_vs_aversive EE_social Type_learning
## Sum of weights:      0.22                   0.21      0.14         
## N containing models:    7                      7         6

Publication bias & sensitivity analysis

Funnel_E<-funnel(mod_Efm, xlab = "lnRR", ylab = "Standard Error")

#year published was scaled previously  under stress PB

dat_Efm$sqrt_inv_e_n <- with(dat_Efm, sqrt(1/CC_n + 1/EC_n + 1/ES_n + 1/CS_n))

PB_MR_E<- rma.mv(lnRR_Sa, lnRRV_S, mods = ~1 + sqrt_inv_e_n +  Year_published + Type_learning + Appetitive_vs_aversive + EE_social + EE_exercise + Age_stress_exposure, random = list(~1|Study_ID,
                                                          ~1|ES_ID,
                                                          ~1|Strain), method = "REML", test = "t", 
    data = dat_Efm)

estimates_PB_MR_E<- estimates.CI(PB_MR_E)
estimates_PB_MR_E
estimate mean lower upper
intrcpt -13.9147389 -59.3987797 31.5693018
sqrt_inv_e_n -0.1711961 -1.0441687 0.7017764
Year_published 0.0068520 -0.0157749 0.0294790
Type_learningHabituation -0.5828922 -1.0088024 -0.1569819
Type_learningRecognition -0.1068229 -0.4651361 0.2514902
Appetitive_vs_aversiveAversive 0.2077553 -0.1500255 0.5655360
Appetitive_vs_aversiveNot applicable 0.3274613 -0.1646052 0.8195278
EE_socialSocial 0.0540916 -0.1861944 0.2943777
EE_exerciseNo exercise -0.0193952 -0.2891022 0.2503118
Age_stress_exposureAdult -0.1438520 -0.5799393 0.2922354
Age_stress_exposureEarly postnatal -0.0499447 -0.4906291 0.3907397
Age_stress_exposurePrenatal -0.1335549 -0.5816002 0.3144903
#no effect of inv_effective sample size or year published

#leave-one-out analysis
dat$Study_ID <- as.factor(dat$Study_ID)

LeaveOneOut_effectsize <- list()
for(i in 1:length(levels(dat$Study_ID))){
  LeaveOneOut_effectsize[[i]] <- rma.mv(yi = lnRR_Ea, V = lnRRV_E, 
                                        random = list(~1 | Study_ID,~1| ES_ID, ~1 | Strain), 
                                        method = "REML", data = dat[dat$Study_ID
                                                                    != levels(dat$Study_ID)[i], ])}


# writing function for extracting est, ci.lb, and ci.ub from all models
est.func <- function(mod_E0){
  df <- data.frame(est = mod_E0$b, lower = mod_E0$ci.lb, upper = mod_E0$ci.ub)
  return(df)
}


#using dplyr to form data frame
MA_CVR <- lapply(LeaveOneOut_effectsize, function(x) est.func(x))%>% bind_rows %>% mutate(left_out = levels(dat$Study_ID))

#telling ggplot to stop reordering factors
MA_CVR$left_out<- as.factor(MA_CVR$left_out)
MA_CVR$left_out<-factor(MA_CVR$left_out, levels = MA_CVR$left_out)

#plotting
leaveoneout_E <- ggplot(MA_CVR) +
  geom_hline(yintercept = 0, lty = 2, lwd = 1) +
  geom_hline(yintercept = mod_E0$ci.lb, lty = 3, lwd = 0.75, colour = "black") +
  geom_hline(yintercept = mod_E0$b, lty = 1, lwd = 0.75, colour = "black") +
  geom_hline(yintercept = mod_E0$ci.ub, lty = 3, lwd = 0.75, colour = "black") +
  geom_pointrange(aes(x = left_out, y = est, ymin = lower, ymax = upper)) +
  xlab("Study left out") + 
  ylab("lnRR, 95% CI") + 
  coord_flip() +
  theme(panel.grid.minor = element_blank())+
  theme_bw() + theme(panel.grid.major = element_blank()) +
  theme(panel.grid.minor.x = element_blank() ) +
  theme(axis.text.y = element_text(size = 6))

leaveoneout_E

dat$Study_ID <- as.integer(dat$Study_ID)

Stress

Intercept model

Learning and memory are significantly reduced due to stress. High heterogeneity

mod_S0 <- rma.mv(yi = lnRR_Sa, V = VCV_S, random = list(~1|Study_ID,
                                                          ~1|ES_ID,
                                                          ~1|Strain),
                 test = "t", 
                 data = dat)

summary(mod_S0) 
## 
## Multivariate Meta-Analysis Model (k = 92; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
## -22.6902   45.3804   53.3804   63.4239   53.8456   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0107  0.1036     30     no  Study_ID 
## sigma^2.2  0.0504  0.2245     92     no     ES_ID 
## sigma^2.3  0.0000  0.0000      6     no    Strain 
## 
## Test for Heterogeneity:
## Q(df = 91) = 933.9737, p-val < .0001
## 
## Model Results:
## 
## estimate      se     tval  df    pval    ci.lb    ci.ub 
##  -0.1155  0.0378  -3.0591  91  0.0029  -0.1906  -0.0405  ** 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
i2_ml(mod_S0) 
##     I2_total  I2_Study_ID     I2_ES_ID    I2_Strain 
## 9.474395e-01 1.662608e-01 7.811787e-01 2.947034e-10
orchard_plot(mod_S0, mod = "Int", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  geom_point(aes(fill = name),  size = 5, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        text = element_text(size = 24), # change font sizes
        legend.title = element_text(size = 15),
        legend.text = element_text(size = 13)) 

Meta-regression

Type of learning

The type of learning/memory response

dat$Type_learning<-as.factor(dat$Type_learning)

VCV_S1 <- impute_covariance_matrix(vi = dat1$lnRRV_S, cluster = dat$Study_ID, r = 0.5)


mod_S1 <- rma.mv(yi = lnRR_Sa, V = VCV_S1, mod = ~Type_learning-1, random =   list(~1|Study_ID,
                                                                                    ~1|ES_ID,
                                                                                    ~1|Strain),
                 test = "t",
                 data = dat1)

summary(mod_S1)
## 
## Multivariate Meta-Analysis Model (k = 92; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
## -17.3159   34.6317   46.6317   61.5635   47.6561   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0203  0.1423     30     no  Study_ID 
## sigma^2.2  0.0351  0.1875     92     no     ES_ID 
## sigma^2.3  0.0000  0.0000      6     no    Strain 
## 
## Test for Residual Heterogeneity:
## QE(df = 89) = 732.1642, p-val < .0001
## 
## Test of Moderators (coefficients 1:3):
## F(df1 = 3, df2 = 89) = 7.3668, p-val = 0.0002
## 
## Model Results:
## 
##                            estimate      se     tval  df    pval    ci.lb 
## Type_learningConditioning   -0.1052  0.0425  -2.4791  89  0.0151  -0.1896 
## Type_learningHabituation    -0.5273  0.1191  -4.4285  89  <.0001  -0.7639 
## Type_learningRecognition    -0.0490  0.0718  -0.6819  89  0.4971  -0.1917 
##                              ci.ub 
## Type_learningConditioning  -0.0209    * 
## Type_learningHabituation   -0.2907  *** 
## Type_learningRecognition    0.0937      
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
r2_ml(mod_S1) 
##   R2_marginal R2_coditional 
##     0.1974766     1.0000000
Learning_S <-orchard_plot(mod_S1, mod = "Type_learning", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  geom_point(aes(fill = name),  size = 5, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        text = element_text(size = 15), # change font sizes
        legend.title = element_text(size = 10),
        legend.text = element_text(size = 10)) 

Learning vs Memory

Is the assay broadly measuring learning or memory?

mod_S2 <-  rma.mv(yi = lnRR_Sa, V = VCV_S, mod = ~Learning_vs_memory-1, random = list(~1|Study_ID,
                                                                                        ~1|ES_ID,
                                                                                        ~1|Strain),
                  test = "t",
                  data = dat)

summary(mod_S2) 
## 
## Multivariate Meta-Analysis Model (k = 85; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
## -11.8032   23.6065   33.6065   45.7007   34.3857   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0261  0.1615     30     no  Study_ID 
## sigma^2.2  0.0267  0.1635     85     no     ES_ID 
## sigma^2.3  0.0000  0.0000      6     no    Strain 
## 
## Test for Residual Heterogeneity:
## QE(df = 83) = 676.7747, p-val < .0001
## 
## Test of Moderators (coefficients 1:2):
## F(df1 = 2, df2 = 83) = 2.9221, p-val = 0.0594
## 
## Model Results:
## 
##                             estimate      se     tval  df    pval    ci.lb 
## Learning_vs_memoryLearning   -0.0573  0.0532  -1.0785  83  0.2840  -0.1631 
## Learning_vs_memoryMemory     -0.1053  0.0436  -2.4135  83  0.0180  -0.1920 
##                               ci.ub 
## Learning_vs_memoryLearning   0.0484    
## Learning_vs_memoryMemory    -0.0185  * 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
r2_ml(mod_S2) 
##   R2_marginal R2_coditional 
##   0.009625517   1.000000000
LvsM_S <- orchard_plot(mod_S2, mod = "Learning_vs_memory", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  geom_point(aes(fill = name),  size = 5, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
       text = element_text(size = 15), # change font sizes
        legend.title = element_text(size = 10),
        legend.text = element_text(size = 10)) 

Type of reinforcement

The type of cue used

VCV_S2 <- VCV_E <- impute_covariance_matrix(vi = dat2$lnRRV_S, cluster = dat$Study_ID, r = 0.5)

mod_S3 <- rma.mv(yi = lnRR_Sa, V = VCV_S2, mod = ~ Appetitive_vs_aversive-1, random = list(~1|Study_ID,
                                                                                            ~1|ES_ID,
                                                                                            ~1|Strain),
                 test = "t",
                 data = dat2)

summary(mod_S3)
## 
## Multivariate Meta-Analysis Model (k = 92; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
## -22.1655   44.3311   56.3311   71.2629   57.3555   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0126  0.1124     30     no  Study_ID 
## sigma^2.2  0.0506  0.2248     92     no     ES_ID 
## sigma^2.3  0.0000  0.0000      6     no    Strain 
## 
## Test for Residual Heterogeneity:
## QE(df = 89) = 911.1858, p-val < .0001
## 
## Test of Moderators (coefficients 1:3):
## F(df1 = 3, df2 = 89) = 3.6161, p-val = 0.0162
## 
## Model Results:
## 
##                                       estimate      se     tval  df    pval 
## Appetitive_vs_aversiveAppetitive       -0.1980  0.0836  -2.3685  89  0.0200 
## Appetitive_vs_aversiveAversive         -0.0747  0.0489  -1.5264  89  0.1305 
## Appetitive_vs_aversiveNot applicable   -0.1386  0.0660  -2.0995  89  0.0386 
##                                         ci.lb    ci.ub 
## Appetitive_vs_aversiveAppetitive      -0.3640  -0.0319  * 
## Appetitive_vs_aversiveAversive        -0.1719   0.0225    
## Appetitive_vs_aversiveNot applicable  -0.2698  -0.0074  * 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
r2_ml(mod_S3) 
##   R2_marginal R2_coditional 
##    0.03676953    1.00000000
Reinforcement_S <-orchard_plot(mod_S3, mod = "Appetitive_vs_aversive", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  geom_point(aes(fill = name),  size = 5, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
       text = element_text(size = 15), # change font sizes
        legend.title = element_text(size = 10),
        legend.text = element_text(size = 10)) 

Type of stress

The type of stress manipulation

dat3 <- filter(dat, Type_stress_exposure %in% c("Restraint", "Noise", "MS", "Combination"))
VCV_S3 <- impute_covariance_matrix(vi = dat3$lnRRV_S, cluster = dat$Study_ID, r = 0.5)

mod_S4 <- rma.mv(yi = lnRR_Sa, V = VCV_S3, mod = ~Type_stress_exposure-1, random = list(~1|Study_ID,
                                                                                         ~1|ES_ID,
                                                                                         ~1|Strain),
                 test = "t",
                 data = dat3)
summary(mod_S4) 
## 
## Multivariate Meta-Analysis Model (k = 85; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
## -21.3621   42.7241   56.7241   73.4853   58.2584   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0236  0.1537     25     no  Study_ID 
## sigma^2.2  0.0527  0.2295     85     no     ES_ID 
## sigma^2.3  0.0000  0.0000      4     no    Strain 
## 
## Test for Residual Heterogeneity:
## QE(df = 81) = 1030.5305, p-val < .0001
## 
## Test of Moderators (coefficients 1:4):
## F(df1 = 4, df2 = 81) = 2.0257, p-val = 0.0985
## 
## Model Results:
## 
##                                  estimate      se     tval  df    pval    ci.lb 
## Type_stress_exposureCombination   -0.0558  0.1132  -0.4927  81  0.6236  -0.2811 
## Type_stress_exposureMS            -0.0607  0.0691  -0.8780  81  0.3825  -0.1982 
## Type_stress_exposureNoise         -0.1468  0.1248  -1.1765  81  0.2428  -0.3950 
## Type_stress_exposureRestraint     -0.2175  0.0878  -2.4764  81  0.0154  -0.3922 
##                                    ci.ub 
## Type_stress_exposureCombination   0.1695    
## Type_stress_exposureMS            0.0768    
## Type_stress_exposureNoise         0.1015    
## Type_stress_exposureRestraint    -0.0427  * 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
r2_ml(mod_S4)
##   R2_marginal R2_coditional 
##    0.05175205    1.00000000
Stressor<- orchard_plot(mod_S4, mod = "Type_stress_exposure", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  geom_point(aes(fill = name),  size = 5, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        text = element_text(size = 15), # change font sizes
        legend.title = element_text(size = 10),
        legend.text = element_text(size = 10)) 

Age of stress

VCV_S3a <- impute_covariance_matrix(vi = dat$lnRRV_S, cluster = dat$Study_ID, r = 0.5)

mod_S5 <-rma.mv(yi = lnRR_Sa, V = VCV_S3a, mod = ~Age_stress_exposure-1, random = list(~1|Study_ID,
                                                                                       ~1|ES_ID,
                                                                                       ~1|Strain),
                test = "t",
                data = dat)
summary(mod_S5) 
## 
## Multivariate Meta-Analysis Model (k = 92; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
## -20.8850   41.7699   55.7699   73.1113   57.1699   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0021  0.0455     30     no  Study_ID 
## sigma^2.2  0.0531  0.2304     92     no     ES_ID 
## sigma^2.3  0.0000  0.0000      6     no    Strain 
## 
## Test for Residual Heterogeneity:
## QE(df = 88) = 830.4810, p-val < .0001
## 
## Test of Moderators (coefficients 1:4):
## F(df1 = 4, df2 = 88) = 4.4779, p-val = 0.0024
## 
## Model Results:
## 
##                                     estimate      se     tval  df    pval 
## Age_stress_exposureAdolescent         0.0244  0.1336   0.1825  88  0.8556 
## Age_stress_exposureAdult             -0.2481  0.0693  -3.5790  88  0.0006 
## Age_stress_exposureEarly postnatal   -0.0645  0.0461  -1.3997  88  0.1651 
## Age_stress_exposurePrenatal          -0.1379  0.0782  -1.7634  88  0.0813 
##                                       ci.lb    ci.ub 
## Age_stress_exposureAdolescent       -0.2411   0.2899      
## Age_stress_exposureAdult            -0.3859  -0.1104  *** 
## Age_stress_exposureEarly postnatal  -0.1561   0.0271      
## Age_stress_exposurePrenatal         -0.2932   0.0175    . 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
r2_ml(mod_S5) 
##   R2_marginal R2_coditional 
##     0.0971388     1.0000000
Age_S <- orchard_plot(mod_S5, mod = "Age_stress_exposure", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  geom_point(aes(fill = name),  size = 5, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        text = element_text(size = 15), # change font sizes
        legend.title = element_text(size = 10),
        legend.text = element_text(size = 10)) 

Stess duration

How long was the stress applied for (chronic = every day for 7 days or more)? This has the highest marginal R2

dat4 <- filter(dat, Stress_duration %in% c("Chronic", "Acute"))
VCV_S4 <- impute_covariance_matrix(vi = dat4$lnRRV_S, cluster = dat$Study_ID, r = 0.5)

mod_S6 <-rma.mv(yi = lnRR_Sa, V = VCV_S4, mod = ~Stress_duration-1, random = list(~1|Study_ID,
                                                                                   ~1|ES_ID,
                                                                                   ~1|Strain),
                test = "t",
                data = dat4)
summary(mod_S6) 
## 
## Multivariate Meta-Analysis Model (k = 89; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
## -23.6341   47.2682   57.2682   69.5978   58.0090   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0173  0.1314     29     no  Study_ID 
## sigma^2.2  0.0514  0.2267     89     no     ES_ID 
## sigma^2.3  0.0000  0.0000      6     no    Strain 
## 
## Test for Residual Heterogeneity:
## QE(df = 87) = 1164.1990, p-val < .0001
## 
## Test of Moderators (coefficients 1:2):
## F(df1 = 2, df2 = 87) = 4.9979, p-val = 0.0088
## 
## Model Results:
## 
##                         estimate      se     tval  df    pval    ci.lb    ci.ub 
## Stress_durationAcute      0.0025  0.0866   0.0292  87  0.9768  -0.1695   0.1746 
## Stress_durationChronic   -0.1509  0.0477  -3.1593  87  0.0022  -0.2458  -0.0559 
##  
## Stress_durationAcute 
## Stress_durationChronic  ** 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
r2_ml(mod_S6) 
##   R2_marginal R2_coditional 
##    0.05879435    1.00000000
Duration_S <- orchard_plot(mod_S6, mod = "Stress_duration", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  geom_point(aes(fill = name),  size = 5, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        text = element_text(size = 15), # change font sizes
        legend.title = element_text(size = 10),
        legend.text = element_text(size = 10)) 

Full model

#selecting moderator levels with k >=5
dat_Sfm <- dat %>%
  filter(Type_learning %in% c("Recognition", "Habituation", "Conditioning"),
         Appetitive_vs_aversive %in% c("Appetitive", "Aversive", "Not applicable"),
         Type_stress_exposure %in% c("Restraint", "Noise", "MS", "Combination"),
         Stress_duration %in% c("Chronic", "Acute"))

VCV_Sfm <- impute_covariance_matrix(vi = dat_Sfm$lnRRV_E, cluster = dat$Study_ID, r = 0.5)
                 
mod_Sfm <- rma.mv(yi = lnRR_Sa, V = VCV_Sfm, mod = ~ Type_learning -1 + Learning_vs_memory + Appetitive_vs_aversive + Type_stress_exposure + Age_stress_exposure + Stress_duration, random =   list(~1|Study_ID,
                                                                                    ~1|ES_ID,
                                                                                    ~1|Strain),
                    test = "t",
                 data = dat_Sfm)
summary(mod_Sfm)
## 
## Multivariate Meta-Analysis Model (k = 75; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
##  -5.4300   10.8600   40.8600   73.0070   51.0728   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0124  0.1113     24     no  Study_ID 
## sigma^2.2  0.0321  0.1791     75     no     ES_ID 
## sigma^2.3  0.0000  0.0000      4     no    Strain 
## 
## Test for Residual Heterogeneity:
## QE(df = 63) = 440.0658, p-val < .0001
## 
## Test of Moderators (coefficients 1:12):
## F(df1 = 12, df2 = 63) = 2.5278, p-val = 0.0087
## 
## Model Results:
## 
##                                       estimate      se     tval  df    pval 
## Type_learningConditioning               0.3879  0.2240   1.7317  63  0.0882 
## Type_learningRecognition                0.3570  0.2965   1.2041  63  0.2330 
## Learning_vs_memoryMemory               -0.0996  0.0619  -1.6094  63  0.1125 
## Appetitive_vs_aversiveAversive          0.0704  0.0964   0.7302  63  0.4679 
## Appetitive_vs_aversiveNot applicable    0.1932  0.2215   0.8720  63  0.3865 
## Type_stress_exposureMS                  0.2887  0.2407   1.1997  63  0.2347 
## Type_stress_exposureNoise               0.0917  0.2881   0.3185  63  0.7512 
## Type_stress_exposureRestraint          -0.0435  0.1817  -0.2391  63  0.8118 
## Age_stress_exposureAdult               -0.1459  0.1670  -0.8733  63  0.3858 
## Age_stress_exposureEarly postnatal     -0.2644  0.2918  -0.9063  63  0.3683 
## Age_stress_exposurePrenatal            -0.2459  0.2078  -1.1834  63  0.2411 
## Stress_durationChronic                 -0.4626  0.1442  -3.2076  63  0.0021 
##                                         ci.lb    ci.ub 
## Type_learningConditioning             -0.0597   0.8354   . 
## Type_learningRecognition              -0.2355   0.9494     
## Learning_vs_memoryMemory              -0.2234   0.0241     
## Appetitive_vs_aversiveAversive        -0.1222   0.2629     
## Appetitive_vs_aversiveNot applicable  -0.2495   0.6358     
## Type_stress_exposureMS                -0.1922   0.7697     
## Type_stress_exposureNoise             -0.4839   0.6674     
## Type_stress_exposureRestraint         -0.4066   0.3197     
## Age_stress_exposureAdult              -0.4796   0.1879     
## Age_stress_exposureEarly postnatal    -0.8474   0.3186     
## Age_stress_exposurePrenatal           -0.6611   0.1693     
## Stress_durationChronic                -0.7509  -0.1744  ** 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
r2_ml(mod_Sfm) 
##   R2_marginal R2_coditional 
##     0.3908065     1.0000000
res_Sfm <- dredge(mod_Sfm, trace=2)
## 
  |                                                                            
  |                                                                      |   0%
  |                                                                            
  |=                                                                     |   2%
  |                                                                            
  |==                                                                    |   3%
  |                                                                            
  |===                                                                   |   5%
  |                                                                            
  |====                                                                  |   6%
  |                                                                            
  |=====                                                                 |   8%
  |                                                                            
  |=======                                                               |   9%
  |                                                                            
  |========                                                              |  11%
  |                                                                            
  |=========                                                             |  12%
  |                                                                            
  |==========                                                            |  14%
  |                                                                            
  |===========                                                           |  16%
  |                                                                            
  |============                                                          |  17%
  |                                                                            
  |=============                                                         |  19%
  |                                                                            
  |==============                                                        |  20%
  |                                                                            
  |===============                                                       |  22%
  |                                                                            
  |================                                                      |  23%
  |                                                                            
  |==================                                                    |  25%
  |                                                                            
  |===================                                                   |  27%
  |                                                                            
  |====================                                                  |  28%
  |                                                                            
  |=====================                                                 |  30%
  |                                                                            
  |======================                                                |  31%
  |                                                                            
  |=======================                                               |  33%
  |                                                                            
  |========================                                              |  34%
  |                                                                            
  |=========================                                             |  36%
  |                                                                            
  |==========================                                            |  38%
  |                                                                            
  |===========================                                           |  39%
  |                                                                            
  |============================                                          |  41%
  |                                                                            
  |==============================                                        |  42%
  |                                                                            
  |===============================                                       |  44%
  |                                                                            
  |================================                                      |  45%
  |                                                                            
  |=================================                                     |  47%
  |                                                                            
  |==================================                                    |  48%
  |                                                                            
  |===================================                                   |  50%
  |                                                                            
  |====================================                                  |  52%
  |                                                                            
  |=====================================                                 |  53%
  |                                                                            
  |======================================                                |  55%
  |                                                                            
  |=======================================                               |  56%
  |                                                                            
  |========================================                              |  58%
  |                                                                            
  |==========================================                            |  59%
  |                                                                            
  |===========================================                           |  61%
  |                                                                            
  |============================================                          |  62%
  |                                                                            
  |=============================================                         |  64%
  |                                                                            
  |==============================================                        |  66%
  |                                                                            
  |===============================================                       |  67%
  |                                                                            
  |================================================                      |  69%
  |                                                                            
  |=================================================                     |  70%
  |                                                                            
  |==================================================                    |  72%
  |                                                                            
  |===================================================                   |  73%
  |                                                                            
  |====================================================                  |  75%
  |                                                                            
  |======================================================                |  77%
  |                                                                            
  |=======================================================               |  78%
  |                                                                            
  |========================================================              |  80%
  |                                                                            
  |=========================================================             |  81%
  |                                                                            
  |==========================================================            |  83%
  |                                                                            
  |===========================================================           |  84%
  |                                                                            
  |============================================================          |  86%
  |                                                                            
  |=============================================================         |  88%
  |                                                                            
  |==============================================================        |  89%
  |                                                                            
  |===============================================================       |  91%
  |                                                                            
  |=================================================================     |  92%
  |                                                                            
  |==================================================================    |  94%
  |                                                                            
  |===================================================================   |  95%
  |                                                                            
  |====================================================================  |  97%
  |                                                                            
  |===================================================================== |  98%
res_Sfm2<- subset(res_Sfm, delta <= 6, recalc.weights=FALSE)
importance(res_Sfm2) #only consists of one model with delta <=2
##                      Learning_vs_memory Stress_duration Type_learning
## Sum of weights:      0.94               0.94            0.20         
## N containing models:    8                  8               4         
##                      Age_stress_exposure Type_stress_exposure
## Sum of weights:      0.16                0.14                
## N containing models:    2                   2                
##                      Appetitive_vs_aversive
## Sum of weights:      0.14                  
## N containing models:    2

Publication bias & sensitivity analysis

funnel(mod_Sfm, xlab = "lnRR", ylab = "Standard Error")

#calculating inv effective sample size for use in full meta-regression
dat_Sfm$sqrt_inv_e_n <- with(dat_Sfm, sqrt(1/CC_n + 1/EC_n + 1/ES_n + 1/CS_n))

#time lag bias and eggers regression
dat_Sfm$c_Year_published <- as.vector(scale(dat_Sfm$Year_published, scale = F))

PB_MR_S<- rma.mv(lnRR_Sa, lnRRV_S, mods = ~1 + sqrt_inv_e_n +  c_Year_published + Type_learning + Appetitive_vs_aversive + Type_stress_exposure + Age_stress_exposure, random = list(~1|Study_ID,
                                                          ~1|ES_ID,
                                                          ~1|Strain), 
                 method = "REML", 
                 test = "t", 
                 data = dat_Sfm,
                  control=list(optimizer="optim", optmethod="Nelder-Mead"))

estimates_PB_MR_S<- estimates.CI(PB_MR_S)
estimates_PB_MR_S
estimate mean lower upper
intrcpt -0.0534910 -0.9384247 0.8314427
sqrt_inv_e_n 0.1476637 -1.2155241 1.5108514
c_Year_published 0.0055548 -0.0243577 0.0354673
Type_learningHabituation -0.6361107 -1.0644764 -0.2077451
Type_learningRecognition -0.1068719 -0.4754180 0.2616742
Appetitive_vs_aversiveAversive 0.1220611 -0.1344910 0.3786133
Appetitive_vs_aversiveNot applicable 0.2373222 -0.1952699 0.6699142
Type_stress_exposureMS -0.0009972 -0.8170353 0.8150410
Type_stress_exposureNoise 0.1814221 -0.8280961 1.1909402
Type_stress_exposureRestraint -0.1312725 -0.5964008 0.3338558
Age_stress_exposureAdult -0.2037280 -0.6376774 0.2302214
Age_stress_exposureEarly postnatal -0.2129606 -1.0867878 0.6608667
Age_stress_exposurePrenatal -0.1999017 -0.7352586 0.3354553
#no effect of inv_effective sample size or year published

#leave-one-out analysis
dat$Study_ID <- as.factor(dat$Study_ID)

LeaveOneOut_effectsize <- list()
for(i in 1:length(levels(dat$Study_ID))){
  LeaveOneOut_effectsize[[i]] <- rma.mv(yi = lnRR_Sa, V = lnRRV_S, 
                                        random = list(~1 | Study_ID,~1| ES_ID, ~1 | Strain), 
                                        method = "REML", data = dat[dat$Study_ID
                                                                    != levels(dat$Study_ID)[i], ])}


# writing function for extracting est, ci.lb, and ci.ub from all models
est.func <- function(mod_E0){
  df <- data.frame(est = mod_E0$b, lower = mod_E0$ci.lb, upper = mod_E0$ci.ub)
  return(df)
}


#using dplyr to form data frame
MA_CVR <- lapply(LeaveOneOut_effectsize, function(x) est.func(x))%>% bind_rows %>% mutate(left_out = levels(dat$Study_ID))

#telling ggplot to stop reordering factors
MA_CVR$left_out<- as.factor(MA_CVR$left_out)
MA_CVR$left_out<-factor(MA_CVR$left_out, levels = MA_CVR$left_out)

#plotting
leaveoneout_S <- ggplot(MA_CVR) +
  geom_hline(yintercept = 0, lty = 2, lwd = 1) +
  geom_hline(yintercept = mod_S0$ci.lb, lty = 3, lwd = 0.75, colour = "black") +
  geom_hline(yintercept = mod_S0$b, lty = 1, lwd = 0.75, colour = "black") +
  geom_hline(yintercept = mod_S0$ci.ub, lty = 3, lwd = 0.75, colour = "black") +
  geom_pointrange(aes(x = left_out, y = est, ymin = lower, ymax = upper)) +
  xlab("Study left out") + 
  ylab("lnRR, 95% CI") + 
  coord_flip() +
  theme(panel.grid.minor = element_blank())+
  theme_bw() + theme(panel.grid.major = element_blank()) +
  theme(panel.grid.minor.x = element_blank() ) +
  theme(axis.text.y = element_text(size = 6))

leaveoneout_S

dat$Study_ID <- as.integer(dat$Study_ID)

Interaction of stress and EE

Intercept

Enriched and stress animals are better at learning and memory. Note: interaction is slightly non-significant after including Wang et al (study that we shifted up due to negative values)

mod_ES0 <- rma.mv(yi = lnRR_ESa, V = VCV_ES, random = list(~1|Study_ID,
                                                             ~1|ES_ID,
                                                             ~1|Strain),
                  test = "t", 
                  data = dat)

summary(mod_ES0) 
## 
## Multivariate Meta-Analysis Model (k = 92; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
## -50.9607  101.9214  109.9214  119.9648  110.3865   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0279  0.1669     30     no  Study_ID 
## sigma^2.2  0.0311  0.1765     92     no     ES_ID 
## sigma^2.3  0.0048  0.0690      6     no    Strain 
## 
## Test for Heterogeneity:
## Q(df = 91) = 307.1213, p-val < .0001
## 
## Model Results:
## 
## estimate      se    tval  df    pval   ci.lb   ci.ub 
##   0.1578  0.0678  2.3273  91  0.0222  0.0231  0.2925  * 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
i2_ml(mod_ES0) 
##    I2_total I2_Study_ID    I2_ES_ID   I2_Strain 
##  0.82109711  0.35875892  0.40100786  0.06133033
orchard_plot(mod_ES0, mod = "Int", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  geom_point(aes(fill = name),  size = 5, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        text = element_text(size = 24), # change font sizes
        legend.title = element_text(size = 15),
        legend.text = element_text(size = 13)) 

#running the model again with the extreme outlier removed
dat_outliers <- dat %>%
  filter(ES_ID != 88)

VCV_ES_outliers <- impute_covariance_matrix(vi = dat_outliers$lnRRV_E, cluster = dat$Study_ID, r = 0.5)

mod_ESoutlier <- rma.mv(yi = lnRR_ESa, V = VCV_ES_outliers, random = list(~1|Study_ID,
                                                             ~1|ES_ID,
                                                             ~1|Strain),
                  test = "t", 
                  data = dat_outliers)

summary(mod_ES0)
## 
## Multivariate Meta-Analysis Model (k = 92; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
## -50.9607  101.9214  109.9214  119.9648  110.3865   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0279  0.1669     30     no  Study_ID 
## sigma^2.2  0.0311  0.1765     92     no     ES_ID 
## sigma^2.3  0.0048  0.0690      6     no    Strain 
## 
## Test for Heterogeneity:
## Q(df = 91) = 307.1213, p-val < .0001
## 
## Model Results:
## 
## estimate      se    tval  df    pval   ci.lb   ci.ub 
##   0.1578  0.0678  2.3273  91  0.0222  0.0231  0.2925  * 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
orchard_plot(mod_ESoutlier, mod = "Int", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  geom_point(aes(fill = name),  size = 5, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        text = element_text(size = 24), # change font sizes
        legend.title = element_text(size = 15),
        legend.text = element_text(size = 13))   

Meta-regression

Type of learning

The type of learning/memory response

VCV_ES1 <- impute_covariance_matrix(vi = dat1$lnRRV_ES, cluster = dat$Study_ID, r = 0.5)

mod_ES1 <- rma.mv(yi = lnRR_ESa, V = VCV_ES1, mod = ~Type_learning-1, random = list(~1|Study_ID,
                                                                                    ~1|ES_ID,
                                                                                    ~1|Strain),
                  test = "t",
                  data = dat1)

summary(mod_ES1)
## 
## Multivariate Meta-Analysis Model (k = 92; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
## -48.9339   97.8678  109.8678  124.7996  110.8921   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0344  0.1854     30     no  Study_ID 
## sigma^2.2  0.0246  0.1569     92     no     ES_ID 
## sigma^2.3  0.0040  0.0633      6     no    Strain 
## 
## Test for Residual Heterogeneity:
## QE(df = 89) = 293.7418, p-val < .0001
## 
## Test of Moderators (coefficients 1:3):
## F(df1 = 3, df2 = 89) = 3.5124, p-val = 0.0184
## 
## Model Results:
## 
##                            estimate      se    tval  df    pval    ci.lb 
## Type_learningConditioning    0.1900  0.0696  2.7284  89  0.0077   0.0516 
## Type_learningHabituation     0.3107  0.1558  1.9946  89  0.0491   0.0012 
## Type_learningRecognition     0.0256  0.0896  0.2852  89  0.7761  -0.1525 
##                             ci.ub 
## Type_learningConditioning  0.3284  ** 
## Type_learningHabituation   0.6202   * 
## Type_learningRecognition   0.2036     
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
r2_ml(mod_ES1) 
##   R2_marginal R2_coditional 
##    0.07392779    0.94099939
Learning_ES <- orchard_plot(mod_ES1, mod = "Type_learning", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  geom_point(aes(fill = name),  size = 3, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        axis.text.x = element_text(size = 10), # change font sizes
        axis.text.y = element_text(size = 10),
        legend.title = element_text(size = 7),
        legend.text = element_text(size = 7)) 

Learning vs Memory

Is the assay broadly measuring learning or memory?

mod_ES2 <-  rma.mv(yi = lnRR_ESa, V = VCV_ES, mod = ~Learning_vs_memory-1, random = list(~1|Study_ID, 
                                                                                           ~1|ES_ID,
                                                                                           ~1|Strain),
                   test = "t",
                   data = dat)

summary(mod_ES2) 
## 
## Multivariate Meta-Analysis Model (k = 85; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
## -49.8593   99.7187  109.7187  121.8129  110.4979   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0282  0.1678     30     no  Study_ID 
## sigma^2.2  0.0338  0.1837     85     no     ES_ID 
## sigma^2.3  0.0052  0.0721      6     no    Strain 
## 
## Test for Residual Heterogeneity:
## QE(df = 83) = 302.1628, p-val < .0001
## 
## Test of Moderators (coefficients 1:2):
## F(df1 = 2, df2 = 83) = 3.0108, p-val = 0.0547
## 
## Model Results:
## 
##                             estimate      se    tval  df    pval    ci.lb 
## Learning_vs_memoryLearning    0.2044  0.0845  2.4175  83  0.0178   0.0362 
## Learning_vs_memoryMemory      0.1379  0.0719  1.9174  83  0.0586  -0.0051 
##                              ci.ub 
## Learning_vs_memoryLearning  0.3725  * 
## Learning_vs_memoryMemory    0.2810  . 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
r2_ml(mod_ES2) 
##   R2_marginal R2_coditional 
##    0.01448953    0.92362134
LvsM_ES <- orchard_plot(mod_ES2, mod = "Learning_vs_memory", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  geom_point(aes(fill = name),  size = 3, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        axis.text.x = element_text(size = 10), # change font sizes
        axis.text.y = element_text(size = 10),
        legend.title = element_text(size = 7),
        legend.text = element_text(size = 7)) 

Type of reinforcement

The type of conditioning used

VCV_ES2 <- impute_covariance_matrix(vi = dat2$lnRRV_ES, cluster = dat2$Study_ID, r = 0.5)

mod_ES3 <- rma.mv(yi = lnRR_ESa, V = VCV_ES2, mod = ~ Appetitive_vs_aversive-1, random = list(~1|Study_ID,
                                                                                               ~1|ES_ID,
                                                                                               ~1|Strain),
                  test = "t",
                  data = dat2)

summary(mod_ES3)
## 
## Multivariate Meta-Analysis Model (k = 92; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
## -49.6471   99.2943  111.2943  126.2261  112.3186   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0335  0.1831     30     no  Study_ID 
## sigma^2.2  0.0275  0.1658     92     no     ES_ID 
## sigma^2.3  0.0014  0.0372      6     no    Strain 
## 
## Test for Residual Heterogeneity:
## QE(df = 89) = 288.3178, p-val < .0001
## 
## Test of Moderators (coefficients 1:3):
## F(df1 = 3, df2 = 89) = 3.2144, p-val = 0.0267
## 
## Model Results:
## 
##                                       estimate      se    tval  df    pval 
## Appetitive_vs_aversiveAppetitive        0.1482  0.1156  1.2825  89  0.2030 
## Appetitive_vs_aversiveAversive          0.1909  0.0667  2.8635  89  0.0052 
## Appetitive_vs_aversiveNot applicable    0.0578  0.0798  0.7247  89  0.4705 
##                                         ci.lb   ci.ub 
## Appetitive_vs_aversiveAppetitive      -0.0814  0.3779     
## Appetitive_vs_aversiveAversive         0.0584  0.3234  ** 
## Appetitive_vs_aversiveNot applicable  -0.1007  0.2163     
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
r2_ml(mod_ES3) 
##   R2_marginal R2_coditional 
##    0.04711742    0.97883321
Reinforcement_ES <- orchard_plot(mod_ES3, mod = "Appetitive_vs_aversive", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  geom_point(aes(fill = name),  size = 3, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        axis.text.x = element_text(size = 10), # change font sizes
        axis.text.y = element_text(size = 10),
        legend.title = element_text(size = 7),
        legend.text = element_text(size = 7)) 

Type of stress

The type of stress manipulation

VCV_ES3 <- impute_covariance_matrix(vi = dat3$lnRRV_ES, cluster = dat$Study_ID, r = 0.5)
mod_ES4 <- rma.mv(yi = lnRR_ESa, V = VCV_ES3, mod = ~Type_stress_exposure-1, random = list(~1|Study_ID,
                                                                                            ~1|ES_ID,
                                                                                            ~1|Strain),
                  test = "t",
                  data = dat3)
summary(mod_ES4)
## 
## Multivariate Meta-Analysis Model (k = 85; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
## -45.8298   91.6596  105.6596  122.4207  107.1939   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0559  0.2365     25     no  Study_ID 
## sigma^2.2  0.0320  0.1788     85     no     ES_ID 
## sigma^2.3  0.0221  0.1487      4     no    Strain 
## 
## Test for Residual Heterogeneity:
## QE(df = 81) = 297.6210, p-val < .0001
## 
## Test of Moderators (coefficients 1:4):
## F(df1 = 4, df2 = 81) = 0.4917, p-val = 0.7418
## 
## Model Results:
## 
##                                  estimate      se    tval  df    pval    ci.lb 
## Type_stress_exposureCombination    0.1140  0.1773  0.6429  81  0.5221  -0.2387 
## Type_stress_exposureMS             0.1571  0.1392  1.1287  81  0.2624  -0.1198 
## Type_stress_exposureNoise          0.2202  0.2133  1.0321  81  0.3051  -0.2043 
## Type_stress_exposureRestraint      0.1788  0.1554  1.1508  81  0.2532  -0.1303 
##                                   ci.ub 
## Type_stress_exposureCombination  0.4667    
## Type_stress_exposureMS           0.4340    
## Type_stress_exposureNoise        0.6447    
## Type_stress_exposureRestraint    0.4880    
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
r2_ml(mod_ES4)
##   R2_marginal R2_coditional 
##   0.008276802   0.800570495
Stressor_ES <- orchard_plot(mod_ES4, mod = "Type_stress_exposure", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  geom_point(aes(fill = name),  size = 3, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        axis.text.x = element_text(size = 10), # change font sizes
        axis.text.y = element_text(size = 10),
        legend.title = element_text(size = 7),
        legend.text = element_text(size = 7))  

Age of stress

The age at which the individuals were exposed to the stressor.

mod_ES5 <-rma.mv(yi = lnRR_ESa, V = VCV_ES, mod = ~Age_stress_exposure-1, random = list(~1|Study_ID,
                                                                                          ~1|ES_ID,
                                                                                          ~1|Strain),
                 test = "t",
                 data = dat)
summary(mod_ES5) 
## 
## Multivariate Meta-Analysis Model (k = 92; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
## -48.8724   97.7449  111.7449  129.0862  113.1449   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0221  0.1486     30     no  Study_ID 
## sigma^2.2  0.0315  0.1774     92     no     ES_ID 
## sigma^2.3  0.0186  0.1365      6     no    Strain 
## 
## Test for Residual Heterogeneity:
## QE(df = 88) = 279.7710, p-val < .0001
## 
## Test of Moderators (coefficients 1:4):
## F(df1 = 4, df2 = 88) = 1.7577, p-val = 0.1446
## 
## Model Results:
## 
##                                     estimate      se    tval  df    pval 
## Age_stress_exposureAdolescent         0.0288  0.2073  0.1388  88  0.8899 
## Age_stress_exposureAdult              0.2096  0.1331  1.5750  88  0.1188 
## Age_stress_exposureEarly postnatal    0.1402  0.1098  1.2770  88  0.2050 
## Age_stress_exposurePrenatal           0.3790  0.1455  2.6059  88  0.0108 
##                                       ci.lb   ci.ub 
## Age_stress_exposureAdolescent       -0.3832  0.4408    
## Age_stress_exposureAdult            -0.0549  0.4741    
## Age_stress_exposureEarly postnatal  -0.0780  0.3583    
## Age_stress_exposurePrenatal          0.0900  0.6681  * 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
r2_ml(mod_ES5) 
##   R2_marginal R2_coditional 
##    0.09585233    0.76647791
Age_stress_ES<-orchard_plot(mod_ES5, mod = "Age_stress_exposure", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  geom_point(aes(fill = name),  size = 3, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
       axis.text.x = element_text(size = 10), # change font sizes
        axis.text.y = element_text(size = 10),
        legend.title = element_text(size = 7),
        legend.text = element_text(size = 7)) 

Stress duration

How long was the stress applied for (chronic = every day for 7 days or more)? This has the highest marginal R2 (currentl nearly 43%) - need to redo without outlier

VCV_ES4 <- impute_covariance_matrix(vi = dat4$lnRRV_ES, cluster = dat4$Study_ID, r = 0.5)

mod_ES6 <-rma.mv(yi = lnRR_ESa, V = VCV_ES4, mod = ~Stress_duration-1, random = list(~1|Study_ID,
                                                                                      ~1|ES_ID,
                                                                                      ~1|Strain),
                 test = "t",
                 data = dat4)
summary(mod_ES6) 
## 
## Multivariate Meta-Analysis Model (k = 89; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
## -46.2033   92.4066  102.4066  114.7362  103.1474   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0136  0.1167     29     no  Study_ID 
## sigma^2.2  0.0351  0.1874     89     no     ES_ID 
## sigma^2.3  0.0104  0.1020      6     no    Strain 
## 
## Test for Residual Heterogeneity:
## QE(df = 87) = 286.8908, p-val < .0001
## 
## Test of Moderators (coefficients 1:2):
## F(df1 = 2, df2 = 87) = 4.5460, p-val = 0.0132
## 
## Model Results:
## 
##                         estimate      se     tval  df    pval    ci.lb   ci.ub 
## Stress_durationAcute     -0.0266  0.1112  -0.2388  87  0.8118  -0.2476  0.1945 
## Stress_durationChronic    0.2220  0.0828   2.6815  87  0.0088   0.0575  0.3866 
##  
## Stress_durationAcute 
## Stress_durationChronic  ** 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
r2_ml(mod_ES6) 
##   R2_marginal R2_coditional 
##     0.1600392     0.8521674
Duration_ES<- orchard_plot(mod_ES6, mod = "Stress_duration", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  geom_point(aes(fill = name),  size = 3, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        axis.text.x = element_text(size = 10), # change font sizes
        axis.text.y = element_text(size = 10),
        legend.title = element_text(size = 7),
        legend.text = element_text(size = 7)) 

Social enrichment

Does EE also include a manipulation of social environment (i.e., number of individuals in EE relative to control)?

VCV_ES5 <- impute_covariance_matrix(vi = dat5$lnRRV_ES, cluster = dat5$Study_ID, r = 0.5)
mod_ES7<- rma.mv(yi = lnRR_ESa, V = VCV_ES5, mod = ~EE_social-1, random = list(~1|Study_ID,
                                                                                ~1|ES_ID,
                                                                                ~1|Strain),
                 test = "t",
                 data = dat5)

summary(mod_ES7)
## 
## Multivariate Meta-Analysis Model (k = 92; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
## -50.3334  100.6668  110.6668  123.1659  111.3811   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0285  0.1689     30     no  Study_ID 
## sigma^2.2  0.0312  0.1765     92     no     ES_ID 
## sigma^2.3  0.0073  0.0857      6     no    Strain 
## 
## Test for Residual Heterogeneity:
## QE(df = 90) = 307.1089, p-val < .0001
## 
## Test of Moderators (coefficients 1:2):
## F(df1 = 2, df2 = 90) = 2.7595, p-val = 0.0687
## 
## Model Results:
## 
##                      estimate      se    tval  df    pval    ci.lb   ci.ub 
## EE_socialNon-social    0.1099  0.0910  1.2080  90  0.2302  -0.0708  0.2906    
## EE_socialSocial        0.2070  0.0892  2.3198  90  0.0226   0.0297  0.3843  * 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
r2_ml(mod_ES7) 
##   R2_marginal R2_coditional 
##    0.03307114    0.89402837
Social_ES <- orchard_plot(mod_ES7, mod = "EE_social", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  geom_point(aes(fill = name),  size = 3, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        axis.text.x = element_text(size = 10), # change font sizes
        axis.text.y = element_text(size = 10),
        legend.title = element_text(size = 7),
        legend.text = element_text(size = 7)) 

Exercise enrichment

Does the form of enrichment include exercise through a running wheel or treadmill?

mod_ES8<- rma.mv(yi = lnRR_ESa, V = VCV_ES, mod = ~EE_exercise-1, random = list(~1|Study_ID, 
    ~1|ES_ID,
    ~1|Strain),
     test = "t",
     data = dat)

summary(mod_ES8)
## 
## Multivariate Meta-Analysis Model (k = 92; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
## -50.3523  100.7046  110.7046  123.2036  111.4189   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0244  0.1561     30     no  Study_ID 
## sigma^2.2  0.0316  0.1778     92     no     ES_ID 
## sigma^2.3  0.0113  0.1062      6     no    Strain 
## 
## Test for Residual Heterogeneity:
## QE(df = 90) = 294.4340, p-val < .0001
## 
## Test of Moderators (coefficients 1:2):
## F(df1 = 2, df2 = 90) = 2.4502, p-val = 0.0920
## 
## Model Results:
## 
##                         estimate      se    tval  df    pval    ci.lb   ci.ub 
## EE_exerciseExercise       0.1298  0.0884  1.4687  90  0.1454  -0.0458  0.3053 
## EE_exerciseNo exercise    0.2313  0.1085  2.1307  90  0.0358   0.0156  0.4469 
##  
## EE_exerciseExercise 
## EE_exerciseNo exercise  * 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
r2_ml(mod_ES8) 
##   R2_marginal R2_coditional 
##    0.03394203    0.83804575
Exercise_ES <- orchard_plot(mod_ES8, mod = "EE_exercise", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  geom_point(aes(fill = name),  size = 3, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
       axis.text.x = element_text(size = 10), # change font sizes
        axis.text.y = element_text(size = 10),
        legend.title = element_text(size = 7),
        legend.text = element_text(size = 7)) 

Order to treatment exposure

What order were animals exposed to stress and EE

mod_ES9 <- rma.mv(yi = lnRR_ESa, V = VCV_ES, mod = ~Stress_vs_EE_timing -1, random = list(~1|Study_ID, 
    ~1|ES_ID,
    ~1|Strain),
     test = "t",
     data = dat)

summary(mod_ES9)
## 
## Multivariate Meta-Analysis Model (k = 92; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
## -48.5795   97.1590  109.1590  124.0908  110.1834   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0309  0.1758     30     no  Study_ID 
## sigma^2.2  0.0303  0.1741     92     no     ES_ID 
## sigma^2.3  0.0000  0.0000      6     no    Strain 
## 
## Test for Residual Heterogeneity:
## QE(df = 89) = 295.5040, p-val < .0001
## 
## Test of Moderators (coefficients 1:3):
## F(df1 = 3, df2 = 89) = 4.1173, p-val = 0.0088
## 
## Model Results:
## 
##                                      estimate      se     tval  df    pval 
## Stress_vs_EE_timingConcurrently        0.2255  0.1360   1.6583  89  0.1008 
## Stress_vs_EE_timingEnrichment first   -0.2430  0.2047  -1.1871  89  0.2384 
## Stress_vs_EE_timingStress first        0.1597  0.0558   2.8623  89  0.0052 
##                                        ci.lb   ci.ub 
## Stress_vs_EE_timingConcurrently      -0.0447  0.4958     
## Stress_vs_EE_timingEnrichment first  -0.6496  0.1637     
## Stress_vs_EE_timingStress first       0.0488  0.2705  ** 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
r2_ml(mod_ES9)
##   R2_marginal R2_coditional 
##     0.1525456     1.0000000
Order_ES <- orchard_plot(mod_ES9, mod = "Stress_vs_EE_timing", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  geom_point(aes(fill = name),  size = 3, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        axis.text.x = element_text(size = 10), # change font sizes
        axis.text.y = element_text(size = 10),
        legend.title = element_text(size = 7),
        legend.text = element_text(size = 7)) 

Age of enrichment

What age were individuals exposed to EE

VCV_ES6 <- impute_covariance_matrix(vi = dat6$lnRRV_ES, cluster = dat6$Study_ID, r = 0.5)

mod_ES10 <- rma.mv(yi = lnRR_ESa, V = VCV_ES6, mod = ~Age_EE_exposure-1, random = list(~1|Study_ID,
                                                                                    ~1|ES_ID,
                                                                                    ~1|Strain),
                 test = "t",
                 data = dat6)

summary(mod_ES10) 
## 
## Multivariate Meta-Analysis Model (k = 88; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
## -45.0007   90.0014  100.0014  112.2731  100.7514   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0315  0.1776     29     no  Study_ID 
## sigma^2.2  0.0277  0.1663     88     no     ES_ID 
## sigma^2.3  0.0022  0.0467      6     no    Strain 
## 
## Test for Residual Heterogeneity:
## QE(df = 86) = 291.6264, p-val < .0001
## 
## Test of Moderators (coefficients 1:2):
## F(df1 = 2, df2 = 86) = 3.5102, p-val = 0.0342
## 
## Model Results:
## 
##                            estimate      se    tval  df    pval    ci.lb 
## Age_EE_exposureAdolescent    0.1616  0.0666  2.4263  86  0.0173   0.0292 
## Age_EE_exposureAdult         0.1527  0.1041  1.4661  86  0.1463  -0.0543 
##                             ci.ub 
## Age_EE_exposureAdolescent  0.2941  * 
## Age_EE_exposureAdult       0.3597    
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
r2_ml(mod_ES10) 
##   R2_marginal R2_coditional 
##  0.0002073979  0.9645265244
Age_enrichment_ES <- orchard_plot(mod_ES10, mod = "Age_EE_exposure", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  geom_point(aes(fill = name),  size = 3, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
       axis.text.x = element_text(size = 10), # change font sizes
        axis.text.y = element_text(size = 10),
        legend.title = element_text(size = 7),
        legend.text = element_text(size = 7)) 

Full model

dat_ESfm <- dat %>%
  filter(Type_learning %in% c("Recognition", "Habituation", "Conditioning"),
         Appetitive_vs_aversive %in% c("Appetitive", "Aversive", "Not applicable"),
         EE_social %in% c("Social", "Non-social"),
         Age_EE_exposure %in% c("Adult", "Adolescent"),
         Type_stress_exposure %in% c("Restraint", "Noise", "MS", "Combination"),
         Stress_duration %in% c("Chronic", "Acute"), 
         Age_stress_exposure %in% c("Prenatal", "Early postnatal", "Adult"))

VCV_ESfm <- impute_covariance_matrix(vi = dat_ESfm$lnRRV_ES, cluster = dat$Study_ID, r = 0.5)
                 
mod_ESfm <- rma.mv(yi = lnRR_Sa, V = VCV_ESfm, mod = ~Type_learning-1 + Learning_vs_memory-1 + Appetitive_vs_aversive-1 + EE_social-1 + EE_exercise-1 + Age_EE_exposure-1 + Type_stress_exposure-1 + Age_stress_exposure-1 + Stress_duration-1 + Stress_vs_EE_timing, random =   list(~1|Study_ID,
                                                                                    ~1|ES_ID,
                                                                                    ~1|Strain),
                    test = "t",
                 data = dat_ESfm)
summary(mod_ESfm)
## 
## Multivariate Meta-Analysis Model (k = 69; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
##  -1.1636    2.3271   38.3271   74.1289   57.8700   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0030  0.0548     21     no  Study_ID 
## sigma^2.2  0.0038  0.0617     69     no     ES_ID 
## sigma^2.3  0.0162  0.1273      2     no    Strain 
## 
## Test for Residual Heterogeneity:
## QE(df = 54) = 80.1918, p-val = 0.0119
## 
## Test of Moderators (coefficients 1:15):
## F(df1 = 15, df2 = 54) = 3.9455, p-val < .0001
## 
## Model Results:
## 
##                                       estimate      se     tval  df    pval 
## Type_learningConditioning              -0.2202  0.2971  -0.7413  54  0.4617 
## Type_learningRecognition               -0.4017  0.4075  -0.9857  54  0.3287 
## Learning_vs_memoryMemory               -0.0592  0.0407  -1.4545  54  0.1516 
## Appetitive_vs_aversiveAversive          0.0549  0.0854   0.6435  54  0.5226 
## Appetitive_vs_aversiveNot applicable    0.1348  0.2627   0.5130  54  0.6100 
## EE_socialSocial                         0.2365  0.1097   2.1565  54  0.0355 
## EE_exerciseNo exercise                  0.0700  0.0872   0.8026  54  0.4257 
## Age_EE_exposureAdult                    0.4873  0.1676   2.9081  54  0.0053 
## Type_stress_exposureMS                  0.2575  0.1971   1.3062  54  0.1970 
## Type_stress_exposureNoise               0.1090  0.2525   0.4318  54  0.6676 
## Type_stress_exposureRestraint          -0.5625  0.1510  -3.7241  54  0.0005 
## Age_stress_exposureEarly postnatal     -0.0628  0.1693  -0.3710  54  0.7121 
## Stress_durationChronic                 -0.3883  0.1191  -3.2614  54  0.0019 
## Stress_vs_EE_timingEnrichment first     0.1473  0.2005   0.7345  54  0.4658 
## Stress_vs_EE_timingStress first         0.1270  0.1820   0.6978  54  0.4883 
##                                         ci.lb    ci.ub 
## Type_learningConditioning             -0.8158   0.3754      
## Type_learningRecognition              -1.2187   0.4154      
## Learning_vs_memoryMemory              -0.1407   0.0224      
## Appetitive_vs_aversiveAversive        -0.1162   0.2261      
## Appetitive_vs_aversiveNot applicable  -0.3919   0.6614      
## EE_socialSocial                        0.0166   0.4563    * 
## EE_exerciseNo exercise                -0.1048   0.2447      
## Age_EE_exposureAdult                   0.1514   0.8233   ** 
## Type_stress_exposureMS                -0.1377   0.6527      
## Type_stress_exposureNoise             -0.3973   0.6153      
## Type_stress_exposureRestraint         -0.8653  -0.2597  *** 
## Age_stress_exposureEarly postnatal    -0.4022   0.2766      
## Stress_durationChronic                -0.6270  -0.1496   ** 
## Stress_vs_EE_timingEnrichment first   -0.2547   0.5492      
## Stress_vs_EE_timingStress first       -0.2379   0.4919      
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
r2_ml(mod_ESfm) 
##   R2_marginal R2_coditional 
##     0.5970272     0.7162767
res_ESfm <- dredge(mod_ESfm, trace=2)
## 
  |                                                                            
  |                                                                      |   0%
  |                                                                            
  |                                                                      |   1%
  |                                                                            
  |=                                                                     |   1%
  |                                                                            
  |=                                                                     |   2%
  |                                                                            
  |==                                                                    |   2%
  |                                                                            
  |==                                                                    |   3%
  |                                                                            
  |==                                                                    |   4%
  |                                                                            
  |===                                                                   |   4%
  |                                                                            
  |===                                                                   |   5%
  |                                                                            
  |====                                                                  |   5%
  |                                                                            
  |====                                                                  |   6%
  |                                                                            
  |=====                                                                 |   6%
  |                                                                            
  |=====                                                                 |   7%
  |                                                                            
  |=====                                                                 |   8%
  |                                                                            
  |======                                                                |   8%
  |                                                                            
  |======                                                                |   9%
  |                                                                            
  |=======                                                               |   9%
  |                                                                            
  |=======                                                               |  10%
  |                                                                            
  |=======                                                               |  11%
  |                                                                            
  |========                                                              |  11%
  |                                                                            
  |========                                                              |  12%
  |                                                                            
  |=========                                                             |  12%
  |                                                                            
  |=========                                                             |  13%
  |                                                                            
  |==========                                                            |  14%
  |                                                                            
  |==========                                                            |  15%
  |                                                                            
  |===========                                                           |  15%
  |                                                                            
  |===========                                                           |  16%
  |                                                                            
  |============                                                          |  17%
  |                                                                            
  |============                                                          |  18%
  |                                                                            
  |=============                                                         |  18%
  |                                                                            
  |=============                                                         |  19%
  |                                                                            
  |==============                                                        |  19%
  |                                                                            
  |==============                                                        |  20%
  |                                                                            
  |==============                                                        |  21%
  |                                                                            
  |===============                                                       |  21%
  |                                                                            
  |===============                                                       |  22%
  |                                                                            
  |================                                                      |  22%
  |                                                                            
  |================                                                      |  23%
  |                                                                            
  |================                                                      |  24%
  |                                                                            
  |=================                                                     |  24%
  |                                                                            
  |=================                                                     |  25%
  |                                                                            
  |==================                                                    |  25%
  |                                                                            
  |==================                                                    |  26%
  |                                                                            
  |===================                                                   |  26%
  |                                                                            
  |===================                                                   |  27%
  |                                                                            
  |===================                                                   |  28%
  |                                                                            
  |====================                                                  |  28%
  |                                                                            
  |====================                                                  |  29%
  |                                                                            
  |=====================                                                 |  29%
  |                                                                            
  |=====================                                                 |  30%
  |                                                                            
  |=====================                                                 |  31%
  |                                                                            
  |======================                                                |  31%
  |                                                                            
  |======================                                                |  32%
  |                                                                            
  |=======================                                               |  32%
  |                                                                            
  |=======================                                               |  33%
  |                                                                            
  |========================                                              |  34%
  |                                                                            
  |========================                                              |  35%
  |                                                                            
  |=========================                                             |  35%
  |                                                                            
  |=========================                                             |  36%
  |                                                                            
  |==========================                                            |  37%
  |                                                                            
  |==========================                                            |  38%
  |                                                                            
  |===========================                                           |  38%
  |                                                                            
  |===========================                                           |  39%
  |                                                                            
  |============================                                          |  39%
  |                                                                            
  |============================                                          |  40%
  |                                                                            
  |============================                                          |  41%
  |                                                                            
  |=============================                                         |  41%
  |                                                                            
  |=============================                                         |  42%
  |                                                                            
  |==============================                                        |  42%
  |                                                                            
  |==============================                                        |  43%
  |                                                                            
  |==============================                                        |  44%
  |                                                                            
  |===============================                                       |  44%
  |                                                                            
  |===============================                                       |  45%
  |                                                                            
  |================================                                      |  45%
  |                                                                            
  |================================                                      |  46%
  |                                                                            
  |=================================                                     |  46%
  |                                                                            
  |=================================                                     |  47%
  |                                                                            
  |=================================                                     |  48%
  |                                                                            
  |==================================                                    |  48%
  |                                                                            
  |==================================                                    |  49%
  |                                                                            
  |===================================                                   |  49%
  |                                                                            
  |===================================                                   |  50%
  |                                                                            
  |===================================                                   |  51%
  |                                                                            
  |====================================                                  |  51%
  |                                                                            
  |====================================                                  |  52%
  |                                                                            
  |=====================================                                 |  52%
  |                                                                            
  |=====================================                                 |  53%
  |                                                                            
  |=====================================                                 |  54%
  |                                                                            
  |======================================                                |  54%
  |                                                                            
  |======================================                                |  55%
  |                                                                            
  |=======================================                               |  55%
  |                                                                            
  |=======================================                               |  56%
  |                                                                            
  |========================================                              |  56%
  |                                                                            
  |========================================                              |  57%
  |                                                                            
  |========================================                              |  58%
  |                                                                            
  |=========================================                             |  58%
  |                                                                            
  |=========================================                             |  59%
  |                                                                            
  |==========================================                            |  59%
  |                                                                            
  |==========================================                            |  60%
  |                                                                            
  |==========================================                            |  61%
  |                                                                            
  |===========================================                           |  61%
  |                                                                            
  |===========================================                           |  62%
  |                                                                            
  |============================================                          |  62%
  |                                                                            
  |============================================                          |  63%
  |                                                                            
  |=============================================                         |  64%
  |                                                                            
  |=============================================                         |  65%
  |                                                                            
  |==============================================                        |  65%
  |                                                                            
  |==============================================                        |  66%
  |                                                                            
  |===============================================                       |  67%
  |                                                                            
  |===============================================                       |  68%
  |                                                                            
  |================================================                      |  68%
  |                                                                            
  |================================================                      |  69%
  |                                                                            
  |=================================================                     |  69%
  |                                                                            
  |=================================================                     |  70%
  |                                                                            
  |=================================================                     |  71%
  |                                                                            
  |==================================================                    |  71%
  |                                                                            
  |==================================================                    |  72%
  |                                                                            
  |===================================================                   |  72%
  |                                                                            
  |===================================================                   |  73%
  |                                                                            
  |===================================================                   |  74%
  |                                                                            
  |====================================================                  |  74%
  |                                                                            
  |====================================================                  |  75%
  |                                                                            
  |=====================================================                 |  75%
  |                                                                            
  |=====================================================                 |  76%
  |                                                                            
  |======================================================                |  76%
  |                                                                            
  |======================================================                |  77%
  |                                                                            
  |======================================================                |  78%
  |                                                                            
  |=======================================================               |  78%
  |                                                                            
  |=======================================================               |  79%
  |                                                                            
  |========================================================              |  79%
  |                                                                            
  |========================================================              |  80%
  |                                                                            
  |========================================================              |  81%
  |                                                                            
  |=========================================================             |  81%
  |                                                                            
  |=========================================================             |  82%
  |                                                                            
  |==========================================================            |  82%
  |                                                                            
  |==========================================================            |  83%
  |                                                                            
  |===========================================================           |  84%
  |                                                                            
  |===========================================================           |  85%
  |                                                                            
  |============================================================          |  85%
  |                                                                            
  |============================================================          |  86%
  |                                                                            
  |=============================================================         |  87%
  |                                                                            
  |=============================================================         |  88%
  |                                                                            
  |==============================================================        |  88%
  |                                                                            
  |==============================================================        |  89%
  |                                                                            
  |===============================================================       |  89%
  |                                                                            
  |===============================================================       |  90%
  |                                                                            
  |===============================================================       |  91%
  |                                                                            
  |================================================================      |  91%
  |                                                                            
  |================================================================      |  92%
  |                                                                            
  |=================================================================     |  92%
  |                                                                            
  |=================================================================     |  93%
  |                                                                            
  |=================================================================     |  94%
  |                                                                            
  |==================================================================    |  94%
  |                                                                            
  |==================================================================    |  95%
  |                                                                            
  |===================================================================   |  95%
  |                                                                            
  |===================================================================   |  96%
  |                                                                            
  |====================================================================  |  96%
  |                                                                            
  |====================================================================  |  97%
  |                                                                            
  |====================================================================  |  98%
  |                                                                            
  |===================================================================== |  98%
  |                                                                            
  |===================================================================== |  99%
  |                                                                            
  |======================================================================|  99%
  |                                                                            
  |======================================================================| 100%
res_ESfm2<- subset(res_ESfm, delta <= 6, recalc.weights=FALSE)
importance(res_ESfm2) 
##                      Learning_vs_memory Stress_duration Age_EE_exposure
## Sum of weights:      0.81               0.75            0.33           
## N containing models:   44                 37              20           
##                      EE_social Age_stress_exposure EE_exercise
## Sum of weights:      0.24      0.20                0.19       
## N containing models:   17        10                  13       
##                      Type_stress_exposure Type_learning Stress_vs_EE_timing
## Sum of weights:      0.18                 0.10          0.05               
## N containing models:   12                   10             4               
##                      Appetitive_vs_aversive
## Sum of weights:      0.01                  
## N containing models:    2

Publication bias & sensitivity analysis

Funnel_ES<-funnel(mod_ESfm, xlab = "lnRR", ylab = "Standard Error")

#year published was scaled previously under stress PB

dat_ESfm$sqrt_inv_e_n <- with(dat_ESfm, sqrt(1/CC_n + 1/EC_n + 1/ES_n + 1/CS_n))

PB_MR_ES<- rma.mv(lnRR_ESa, lnRRV_ES, mods = ~1 + sqrt_inv_e_n +  Year_published + Type_learning + Appetitive_vs_aversive + EE_social + EE_exercise + Age_stress_exposure, random = list(~1|Study_ID,
                                                          ~1|ES_ID,
                                                          ~1|Strain), method = "REML", test = "t", 
    data = dat_ESfm)

estimates_PB_MR_ES<- estimates.CI(PB_MR_ES)
estimates_PB_MR_ES
estimate mean lower upper
intrcpt 44.1112313 -25.6589074 113.8813700
sqrt_inv_e_n -0.6897056 -1.8750276 0.4956163
Year_published -0.0216674 -0.0562695 0.0129347
Type_learningHabituation 0.2280781 -0.4019047 0.8580609
Type_learningRecognition -0.0104748 -0.5598110 0.5388614
Appetitive_vs_aversiveAversive 0.0003694 -0.4348552 0.4355939
Appetitive_vs_aversiveNot applicable -0.2293071 -0.9140504 0.4554362
EE_socialSocial 0.1166764 -0.2532547 0.4866075
EE_exerciseNo exercise 0.1207767 -0.2456524 0.4872057
Age_stress_exposureEarly postnatal 0.1583410 -0.2028311 0.5195131
Age_stress_exposurePrenatal 0.1954658 -0.3134537 0.7043854
#no effect of inv_effective sample size or year published

#leave-one-out analysis
dat$Study_ID <- as.factor(dat$Study_ID)

LeaveOneOut_effectsize <- list()
for(i in 1:length(levels(dat$Study_ID))){
  LeaveOneOut_effectsize[[i]] <- rma.mv(yi = lnRR_Ea, V = lnRRV_E, 
                                        random = list(~1 | Study_ID,~1| ES_ID, ~1 | Strain), 
                                        method = "REML", data = dat[dat$Study_ID
                                                                    != levels(dat$Study_ID)[i], ])}


# writing function for extracting est, ci.lb, and ci.ub from all models
est.func <- function(mod_E0){
  df <- data.frame(est = mod_E0$b, lower = mod_E0$ci.lb, upper = mod_E0$ci.ub)
  return(df)
}


#using dplyr to form data frame
MA_CVR <- lapply(LeaveOneOut_effectsize, function(x) est.func(x))%>% bind_rows %>% mutate(left_out = levels(dat$Study_ID))

#telling ggplot to stop reordering factors
MA_CVR$left_out<- as.factor(MA_CVR$left_out)
MA_CVR$left_out<-factor(MA_CVR$left_out, levels = MA_CVR$left_out)

#plotting
leaveoneout_ES <- ggplot(MA_CVR) +
  geom_hline(yintercept = 0, lty = 2, lwd = 1) +
  geom_hline(yintercept = mod_E0$ci.lb, lty = 3, lwd = 0.75, colour = "black") +
  geom_hline(yintercept = mod_E0$b, lty = 1, lwd = 0.75, colour = "black") +
  geom_hline(yintercept = mod_E0$ci.ub, lty = 3, lwd = 0.75, colour = "black") +
  geom_pointrange(aes(x = left_out, y = est, ymin = lower, ymax = upper)) +
  xlab("Study left out") + 
  ylab("lnRR, 95% CI") + 
  coord_flip() +
  theme(panel.grid.minor = element_blank())+
  theme_bw() + theme(panel.grid.major = element_blank()) +
  theme(panel.grid.minor.x = element_blank() ) +
  theme(axis.text.y = element_text(size = 6))

leaveoneout_ES

dat$Study_ID <- as.integer(dat$Study_ID)

Combined orchard plot

mod_list1 <- list(mod_E0, mod_S0, mod_ES0)

mod_res1 <- lapply(mod_list1, function(x) mod_results(x, mod = "Int"))

merged1 <- submerge(mod_res1[[3]], mod_res1[[2]],  mod_res1[[1]], mix = T)
merged1$mod_table$name <- factor(merged1$mod_table$name, levels = c("Intrcpt1", 
    "Intrcpt2", "Intrcpt3"), 
    labels = rev(c("Enrichment ME", "Stress ME", "Interaction")))

merged1$data$moderator <- factor(merged1$data$moderator, levels = c("Intrcpt1", 
    "Intrcpt2", "Intrcpt3"), 
    labels = rev(c("Enrichment ME", "Stress ME", "Interaction")))

orchard1<- orchard_plot(merged1, mod = "Int", xlab = "lnRR", angle = 0) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals 
  xlim(-2,4.5) +
  geom_point(aes(fill = name),  size = 5, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        text = element_text(size = 15), # change font sizes
        legend.title = element_text(size = 10),
        legend.text = element_text(size = 10)) 

orchard1

Age of enrichment

The age at which the individuals were exposed to environmental enrichment.
This needs to be redone after recategorising

mod_ES9 <- rma.mv(yi = lnRR_ESa, V = lnRRV_ES, mod = ~Age_EE_exposure-1, random = list(~1|Study_ID,
                                                                                       ~1|ES_ID,
                                                                                       ~1|Strain),
                  test = "t",
                  data = dat)

summary(mod_ES9) 
## 
## Multivariate Meta-Analysis Model (k = 92; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
## -47.5384   95.0767  107.0767  122.0086  108.1011   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0450  0.2122     30     no  Study_ID 
## sigma^2.2  0.0194  0.1393     92     no     ES_ID 
## sigma^2.3  0.0000  0.0000      6     no    Strain 
## 
## Test for Residual Heterogeneity:
## QE(df = 89) = 302.2511, p-val < .0001
## 
## Test of Moderators (coefficients 1:3):
## F(df1 = 3, df2 = 89) = 4.6525, p-val = 0.0046
## 
## Model Results:
## 
##                                 estimate      se     tval  df    pval    ci.lb 
## Age_EE_exposureAdolescent         0.2006  0.0599   3.3519  89  0.0012   0.0817 
## Age_EE_exposureAdult              0.1526  0.0979   1.5582  89  0.1227  -0.0420 
## Age_EE_exposureEarly postnatal   -0.1674  0.3086  -0.5424  89  0.5889  -0.7805 
##                                  ci.ub 
## Age_EE_exposureAdolescent       0.3196  ** 
## Age_EE_exposureAdult            0.3472     
## Age_EE_exposureEarly postnatal  0.4457     
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
r2_ml(mod_ES9) 
##   R2_marginal R2_coditional 
##      0.082042      1.000000
# Orchard plot 
orchard_plot(mod_ES9, mod = "Age_EE_exposure", xlab = "lnRR", alpha=0.4) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals
  geom_point(aes(fill = name),  size = 5, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        text = element_text(size = 24), # change font sizes
        legend.title = element_text(size = 15),
        legend.text = element_text(size = 13)) 

‘Pairwise effect sizes’

Comparing ES relative to stress we can see that the increase is about 18% but ES relative to control is 12% (i.e., this is the interaction).

We can also see that ES relative to control 9% so these individuals actually do better than baseline individuals. There is also no significant difference between ES and EC individuals (although estimate is slightly lower), meaning that enrichment in the presence of stress can result in learning and memory that is just as good as enriched individuals without stress

Enrichment relative to control

VCV_E20 <- impute_covariance_matrix(vi = dat$lnRRV_E2, cluster = dat$Study_ID, r = 0.5)

#Model doesn't converge with VCV
mod_E20 <- rma.mv(yi = lnRR_E2a, V = VCV_E20, random = list(~1|Study_ID,
                                                             ~ 1|Strain,
                                                             ~1|ES_ID),
                 test = "t", 
                 data = dat, 
                 control=list(optimizer="optim", optmethod="Nelder-Mead"))

summary(mod_E20) 
## 
## Multivariate Meta-Analysis Model (k = 92; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
## -80.4236  160.8471  168.8471  178.8905  169.3122   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0209  0.1444     30     no  Study_ID 
## sigma^2.2  0.0000  0.0001      6     no    Strain 
## sigma^2.3  0.0000  0.0000     92     no     ES_ID 
## 
## Test for Heterogeneity:
## Q(df = 91) = 23.4395, p-val = 1.0000
## 
## Model Results:
## 
## estimate      se    tval  df    pval    ci.lb   ci.ub 
##   0.1376  0.0729  1.8860  91  0.0625  -0.0073  0.2825  . 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
i2_ml(mod_E20) 
##     I2_total  I2_Study_ID    I2_Strain     I2_ES_ID 
## 8.461245e-02 8.461241e-02 4.008740e-08 0.000000e+00
orchard_plot(mod_E20, mod = "Int", xlab = "lnRR")

Stress relative to control

VCV_S20 <- impute_covariance_matrix(vi = dat$lnRRV_S2, cluster = dat$Study_ID, r = 0.5)

mod_S20 <- rma.mv(yi = lnRR_S2a, V = VCV_S20, random = list(~1|Study_ID, 
                                                             ~ 1|Strain,
                                                             ~1|ES_ID),
                 test = "t",
                 data = dat)

summary(mod_S20) 
## 
## Multivariate Meta-Analysis Model (k = 92; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
## -89.4392  178.8785  186.8785  196.9219  187.3436   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0300  0.1732     30     no  Study_ID 
## sigma^2.2  0.0000  0.0000      6     no    Strain 
## sigma^2.3  0.0000  0.0000     92     no     ES_ID 
## 
## Test for Heterogeneity:
## Q(df = 91) = 37.2472, p-val = 1.0000
## 
## Model Results:
## 
## estimate      se     tval  df    pval    ci.lb   ci.ub 
##  -0.0735  0.0790  -0.9307  91  0.3545  -0.2304  0.0834    
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
i2_ml(mod_S20) 
##     I2_total  I2_Study_ID    I2_Strain     I2_ES_ID 
## 1.143516e-01 1.143515e-01 7.749001e-09 0.000000e+00
orchard_plot(mod_S20, mod = "Int", xlab = "lnRR")

Enrichment + stress relative to control

VCV_ES20 <- impute_covariance_matrix(vi = dat$lnRRV_ES2, cluster = dat$Study_ID, r = 0.5)

mod_ES20 <- rma.mv(yi = lnRR_ES2a, V = VCV_ES20, random = list(~1|Study_ID,
                                                                ~ 1|Strain,
                                                                ~1|ES_ID),
                 test = "t",
                 data = dat)
summary(mod_ES20) 
## 
## Multivariate Meta-Analysis Model (k = 92; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
## -86.3233  172.6465  180.6465  190.6900  181.1116   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0238  0.1541     30     no  Study_ID 
## sigma^2.2  0.0000  0.0000      6     no    Strain 
## sigma^2.3  0.0000  0.0000     92     no     ES_ID 
## 
## Test for Heterogeneity:
## Q(df = 91) = 29.6180, p-val = 1.0000
## 
## Model Results:
## 
## estimate      se    tval  df    pval    ci.lb   ci.ub 
##   0.1100  0.0772  1.4247  91  0.1577  -0.0434  0.2634    
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
i2_ml(mod_ES20) 
##     I2_total  I2_Study_ID    I2_Strain     I2_ES_ID 
## 8.967119e-02 8.967119e-02 4.077839e-10 1.167794e-10
orchard_plot(mod_ES20, mod = "Int", xlab = "lnRR")

Enrichment + stress relative to stress

VCV_E30 <- impute_covariance_matrix(vi = dat$lnRRV_E3, cluster = dat$Study_ID, r = 0.5)

mod_E30 <- rma.mv(yi = lnRR_E3a, V = VCV_E30, random = list(~1|Study_ID,
                                                             ~ 1|Strain,
                                                             ~1|ES_ID),
                  test = "t",
                  data = dat)
summary(mod_E30) 
## 
## Multivariate Meta-Analysis Model (k = 92; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
## -90.8497  181.6995  189.6995  199.7429  190.1646   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0097  0.0986     30     no  Study_ID 
## sigma^2.2  0.0000  0.0001      6     no    Strain 
## sigma^2.3  0.0000  0.0000     92     no     ES_ID 
## 
## Test for Heterogeneity:
## Q(df = 91) = 27.4127, p-val = 1.0000
## 
## Model Results:
## 
## estimate      se    tval  df    pval    ci.lb   ci.ub 
##   0.1293  0.0674  1.9192  91  0.0581  -0.0045  0.2632  . 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
i2_ml(mod_E30) 
##     I2_total  I2_Study_ID    I2_Strain     I2_ES_ID 
## 4.001741e-02 4.001738e-02 2.945906e-08 1.186013e-14
orchard_plot(mod_E30, mod = "Int", xlab = "lnRR")

Enrichment + stress relative to enrichment

VCV_S30 <- impute_covariance_matrix(vi = dat$lnRRV_S3, cluster = dat$Study_ID, r = 0.5)

mod_S30 <- rma.mv(yi = lnRR_S3a, V = VCV_S30, random = list(~1|Study_ID,
                                                             ~ 1|Strain,
                                                             ~1|ES_ID),
                  test = "t",
                  data = dat,
                   control=list(optimizer="optim", optmethod="Nelder-Mead"))
summary(mod_S30) 
## 
## Multivariate Meta-Analysis Model (k = 92; method: REML)
## 
##   logLik  Deviance       AIC       BIC      AICc 
## -80.0114  160.0229  168.0229  178.0663  168.4880   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.0000  0.0000     30     no  Study_ID 
## sigma^2.2  0.0000  0.0000      6     no    Strain 
## sigma^2.3  0.0000  0.0000     92     no     ES_ID 
## 
## Test for Heterogeneity:
## Q(df = 91) = 15.4501, p-val = 1.0000
## 
## Model Results:
## 
## estimate      se     tval  df    pval    ci.lb   ci.ub 
##  -0.0120  0.0618  -0.1942  91  0.8465  -0.1347  0.1107    
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
i2_ml(mod_S30) 
##    I2_total I2_Study_ID   I2_Strain    I2_ES_ID 
##           0           0           0           0
orchard_plot(mod_S30, mod = "Int", xlab = "lnRR")

Combined orchard plot

mod_list2 <- list(mod_S30, mod_E30, mod_ES20, mod_S20, mod_E20) #rearranged the order so that it matches intext results

mod_res2 <- lapply(mod_list2, function(x) mod_results(x, mod = "Int"))

merged2 <- submerge(mod_res2[[1]], mod_res2[[2]],  mod_res2[[3]], mod_res2[[4]],  mod_res2[[5]], mix = T)

merged2$mod_table$name <- factor(merged2$mod_table$name, levels = c("Intrcpt1", 
    "Intrcpt2", "Intrcpt3", "Intrcpt4", "Intrcpt5"), 
    labels = rev(c("EC/CC", "CS/CC", "ES/CC", "ES/CS", "ES/EC")))

merged2$data$moderator <- factor(merged2$data$moderator, levels = c("Intrcpt1", 
    "Intrcpt2", "Intrcpt3", "Intrcpt4", "Intrcpt5"), 
    labels = rev(c("EC/CC", "CS/CC", "ES/CC", "ES/CS", "ES/EC")))

orchard2 <- orchard_plot(merged2, mod = "Int", xlab = "lnRR", angle = 0) + 
  geom_errorbarh(aes(xmin = lowerPR, xmax = upperPR), height = 0, show.legend = FALSE, size = 1.1, alpha = 0.5) + # prediction intervals
  geom_errorbarh(aes(xmin = lowerCL, xmax = upperCL), height = 0.05, show.legend = FALSE, size = 2) + # confidence intervals 
  xlim(-2,4.5) +
  geom_point(aes(fill = name),  size = 5, shape = 21)+ # mean estimate
  scale_size_continuous(range = c(1, 7))+ # change point scaling
  theme(panel.border = element_rect(colour = "black", fill=NA, size=1.3), # border around the plot
        text = element_text(size = 15), # change font sizes
        legend.title = element_text(size = 10),
        legend.text = element_text(size = 10)) 

orchard2

## Panel of both orchard plots

p1 <- orchard1 + orchard2 +  plot_annotation(tag_levels = 'A')
p1

Panel of metagressions

#Enrichment
E_mod <- (LvsM_E + Learning_E + Reinforcement_E)/ (Age_E + Exercise_E + Social_E) +  plot_annotation(tag_levels = 'A')

S_mod <- (LvsM_S + Learning_S + Reinforcement_S) / (Age_S + Stressor + Duration_S) + plot_annotation(tag_levels = 'A')

ES_mod <- plot_grid(LvsM_ES, Learning_ES, Reinforcement_ES, Age_enrichment_ES, Age_stress_ES, Order_ES, Exercise_ES, Social_ES, Stressor_ES, Duration_ES,
  labels = "AUTO", ncol = 5)

Modelling with SMD

Stress

Intercept model

Why does the funnel plot look so strange and why is I2 higher?

mod_S0a <- rma.mv(yi = SMD_Sa, V = VCV_Sa, random = list(~1|Study_ID,
                                                         ~1|ES_ID,
                                                         ~1|Strain),
                test = "t",
                data = dat)
summary(mod_S0a) 
## 
## Multivariate Meta-Analysis Model (k = 92; method: REML)
## 
##    logLik   Deviance        AIC        BIC       AICc 
## -121.4344   242.8688   250.8688   260.9122   251.3339   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.5602  0.7484     30     no  Study_ID 
## sigma^2.2  0.5409  0.7355     92     no     ES_ID 
## sigma^2.3  0.0000  0.0000      6     no    Strain 
## 
## Test for Heterogeneity:
## Q(df = 91) = 28682.7938, p-val < .0001
## 
## Model Results:
## 
## estimate      se     tval  df    pval    ci.lb    ci.ub 
##  -0.4360  0.1627  -2.6795  91  0.0088  -0.7592  -0.1128  ** 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
i2_ml(mod_S0a) 
##     I2_total  I2_Study_ID     I2_ES_ID    I2_Strain 
## 9.979596e-01 5.076959e-01 4.902636e-01 2.171066e-09
funnel(mod_S0a) 

funnel(mod_S0a, yaxis="seinv",) 

Environmental Enrichment

Intercept model

Why does the funnel plot look so strange and why is I2 higher?

mod_E0a <- rma.mv(yi = SMD_Ea, V = VCV_Ea, random = list(~1|Study_ID,
                                                         ~1|ES_ID, 
                                                         ~1|Strain),
                 test = "t",
                 data = dat)
summary(mod_E0a)
## 
## Multivariate Meta-Analysis Model (k = 92; method: REML)
## 
##    logLik   Deviance        AIC        BIC       AICc 
## -113.1872   226.3745   234.3745   244.4179   234.8396   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.3274  0.5722     30     no  Study_ID 
## sigma^2.2  0.4890  0.6993     92     no     ES_ID 
## sigma^2.3  0.0000  0.0000      6     no    Strain 
## 
## Test for Heterogeneity:
## Q(df = 91) = 12552.8093, p-val < .0001
## 
## Model Results:
## 
## estimate      se    tval  df    pval   ci.lb   ci.ub 
##   0.7290  0.1333  5.4689  91  <.0001  0.4642  0.9938  *** 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
i2_ml(mod_E0a)
##     I2_total  I2_Study_ID     I2_ES_ID    I2_Strain 
## 9.972502e-01 3.999606e-01 5.972896e-01 1.643432e-09
funnel(mod_E0a) 

Interaction

Intercept model

Why does the funnel plot look so strange and why is I2 higher?

mod_ES0a <- rma.mv(yi = SMD_ESa, V = VCV_ESa, random = list(~1|Study_ID,
                                                            ~1|ES_ID,
                                                            ~1|Strain),
                  test = "t",
                  data = dat)
summary(mod_ES0a)
## 
## Multivariate Meta-Analysis Model (k = 92; method: REML)
## 
##    logLik   Deviance        AIC        BIC       AICc 
## -126.8085   253.6170   261.6170   271.6604   262.0821   
## 
## Variance Components:
## 
##             estim    sqrt  nlvls  fixed    factor 
## sigma^2.1  0.7153  0.8457     30     no  Study_ID 
## sigma^2.2  0.5861  0.7656     92     no     ES_ID 
## sigma^2.3  0.0000  0.0001      6     no    Strain 
## 
## Test for Heterogeneity:
## Q(df = 91) = 11648.6783, p-val < .0001
## 
## Model Results:
## 
## estimate      se    tval  df    pval   ci.lb   ci.ub 
##   0.6961  0.1810  3.8449  91  0.0002  0.3365  1.0557  *** 
## 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
i2_ml(mod_ES0a) #I2 is very high! 99.37%
##     I2_total  I2_Study_ID     I2_ES_ID    I2_Strain 
## 9.931281e-01 5.458585e-01 4.472695e-01 1.577079e-08
funnel(mod_ES0a)